赞
踩
Scrapy 是一个用于爬取网站数据的强大的开源 Python 框架。它提供了一个高级的抓取和数据提取工具集,使您能够快速、灵活地构建和扩展网络爬虫。
Scrapy 的一些主要优点:
Scrapy 的一些缺点:
pip install scrapy
|-- scrapySpider 项目目录
| |-- scrapySpider 项目目录
| | |-- spiders 爬虫文件目录
| | | |-- douban.py 爬虫文件
| | |-- items.py 定义数据模型文件,类似于数据库表的模式或数据结构的定义
| | |-- middlewares.py 定义中间件文件,用于对请求和响应进行处理和修改
| | |-- pipelines.py 定义数据处理管道(Pipeline)文件,用于处理爬取到的数据的组件
| | |-- settings.py 定义配置文件,用于配置 Scrapy 项目的各种设置选项和参数
| |-- scrapy.cfg 框架中的配置文件,用于指定项目的结构和元数据信息
配置文件文档:https://docs.scrapy.org/en/latest/topics/settings.html
需要在settings.py中开启管道:ITEM_PIPELINES
from scrapy import cmdline
cmdline.execute('scrapy crawl douban'.split())
request.meta['proxy'] #指定请求使用的代理服务器
request.meta['headers'] #设置请求的头部信息
request.meta['dont_redirect'] #控制请求是否自动重定向
request.meta['dont_retry'] #控制请求是否自动重试
request.meta['handle_httpstatus_list'] #设置要处理的HTTP状态码列表
request.meta['cookiejar'] #指定请求使用的cookiejar
request.meta['download_timeout'] #设置请求的下载超时时间
request.meta['download_slot'] #指定请求使用的下载槽(slot)
request.meta['download_latency'] #指定请求的下载延迟时间
request.meta['priority'] #设置请求的优先级。
request.meta['dont_cache'] #控制请求是否被缓存。
request.meta['retry_times'] #设置请求的重试次数。
request.meta['redirect_urls'] #存储请求的重定向URL列表。
request.meta['redirect_reasons'] #存储请求的重定向原因列表。
request.meta['redirected'] #标记请求是否已经被重定向。
request.meta['handle_httpstatus_all'] #设置是否处理所有的HTTP状态码。
request.meta['handle_httpstatus_ignore'] #设置要忽略的HTTP状态码列表。
request.meta['handle_httpstatus_exists'] #设置要处理的存在的HTTP状态码列表。
request.meta['handle_httpstatus_nonexistent'] #设置要处理的不存在的HTTP状态码列表。
request.meta['retry_http_codes'] #设置触发请求重试的HTTP状态码列表。
1.事件监控官网:https://scrapeops.io/app/jobs
## settings.py
## 你的平台秘钥
SCRAPEOPS_API_KEY = 'YOUR_API_KEY'
## 添加ScrapeOps扩展
EXTENSIONS = {
'scrapeops_scrapy.extension.ScrapeOpsMonitor': 500,
}
## 更新下载器中间件
DOWNLOADER_MIDDLEWARES = {
'scrapeops_scrapy.middleware.retry.RetryMiddleware': 550,
}
EXTENSIONS = {
'scrapy.extensions.statsmailer.StatsMailer': 500,
}
STATSMAILER_RCPTS = ['你的邮箱']
MAIL_FROM = '你的邮箱'
MAIL_HOST = 'smtp.qq.com'
MAIL_PORT = 465
MAIL_USER = '你的邮箱'
#配置好smtp服务给的密码
MAIL_PASS = ''
MAIL_SSL=True
import scrapy
import scrapy.cmdline
from scrapy.mail import MailSender
class SeleniumTxWorkSpider(scrapy.Spider):
....
def close(self, spider, reason):
mail_sender = MailSender.from_settings(spider.settings)
print(type(spider.settings))
return mail_sender.send(to=spider.settings['STATSMAILER_RCPTS'],subject='python', body='爬虫结束')
if __name__ == '__main__':
scrapy.cmdline.execute('scrapy crawl seleniumTxWork'.split())
|-- scrapySpider 项目目录
| |-- scrapySpider 项目目录
| | |-- spiders 爬虫文件目录
| | | |-- douban.py 爬虫文件
| | |-- items.py 定义数据模型文件,类似于数据库表的模式或数据结构的定义
| | |-- middlewares.py 定义中间件文件,用于对请求和响应进行处理和修改
| | |-- pipelines.py 定义数据处理管道(Pipeline)文件,用于处理爬取到的数据的组件
| | |-- settings.py 定义配置文件,用于配置 Scrapy 项目的各种设置选项和参数
| |-- scrapy.cfg 框架中的配置文件,用于指定项目的结构和元数据信息
import requests
class Kuaidaili():
request_url = {
# 获取代理ip前面
'getIpSignature': 'https://auth.kdlapi.com/api/get_secret_token',
# 获取代理ip
'getIp': 'https://dps.kdlapi.com/api/getdps?secret_id=oy2q5xu76k4s8olx59et&num=1&signature={}'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
ip_use = '购买代理用户名'
ip_password = '购买代理密码'
def __init__(self):
'''创建request会话对象'''
self.request_session = requests.Session()
self.request_session.headers.update(self.headers)
# 获取代理ip签名
@classmethod
def get_ip_url(cls):
par = {
'secret_id': 'oy2q5xu76k4s8olx59et',
'secret_key': '5xg6gvouc0vszfw0kxs1a8vrw1r6ity7'
}
response = requests.post(cls.request_url['getIpSignature'],data=par)
response_data = response.json()
return cls.request_url['getIp'].format(response_data['data']['secret_token'])
@classmethod
def get_ip(cls):
url = cls.get_ip_url()
response = requests.get(url)
return f'http://{cls.ip_use}:{cls.ip_password}@{response.text}/'
if __name__ == '__main__':
kuaidaili = Kuaidaili()
print(kuaidaili.get_ip())
6 .爬取豆瓣案例
import scrapy
from scrapy import cmdline
from scrapy.http import HtmlResponse,Request
from scrapySpider.items import DoubanItem
class DoubanSpider(scrapy.Spider):
name = 'douban'
allowed_domains = ['movie.douban.com']
start_urls = ['https://movie.douban.com/top250']
def parse(self, response: HtmlResponse,**kwargs):
video_list = response.xpath('//ol[@class="grid_view"]/li')
for li in video_list:
item = DoubanItem()
item['title'] = li.xpath('.//div[@class="hd"]/a/span[1]/text()').extract_first()
item['rating'] = li.xpath('.//div[@class="bd"]//span[@class="rating_num"]/text()').extract_first()
item['quote'] = li.xpath('.//div[@class="bd"]//p[@class="quote"]/span/text()').extract_first()
detail_url = li.xpath('.//div[@class="hd"]/a/@href').extract_first()
yield Request(url=detail_url,callback=self.get_detail_info,meta={'item':item})
#获取下一页数据
next_page_url = response.xpath('//div[@class="paginator"]//link[@rel="next"]/@href').extract_first()
if next_page_url:
yield Request(url=response.urljoin(next_page_url),callback=self.parse)
#重写start_requests获取多页数据
# def start_requests(self):
# for i in range(0,2):
# yield Request(url=f'{self.start_urls[0]}?start={i*25}&filter=',dont_filter=True,callback=self.parse)
def get_detail_info(self,response:HtmlResponse):
item = response.meta['item']
detail = response.xpath('//span[@class="all hidden"]/text()').extract_first()
if not detail:
detail = response.xpath('//div[@id="link-report-intra"]/span[1]/text()').extract_first()
item['intro'] = detail.strip()
return item
if __name__ == '__main__':
cmdline.execute('scrapy crawl douban'.split())
# Scrapy settings for scrapySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "scrapySpider"
SPIDER_MODULES = ["scrapySpider.spiders"]
NEWSPIDER_MODULE = "scrapySpider.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en",
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# "scrapySpider.middlewares.ScrapyspiderSpiderMiddleware": 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
"scrapySpider.middlewares.DoubanDownloaderMiddleware": 543,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapeops_scrapy.extension.ScrapeOpsMonitor': 500,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"scrapySpider.pipelines.MysqlPipeLine": 300,
"scrapySpider.pipelines.MongoPipeLine": 301,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
#日志配置
# LOG_FILE = 'log.log'
# LOG_FILE_APPEND = False
# LOG_LEVEL = 'INFO'
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DoubanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
rating = scrapy.Field()
quote = scrapy.Field()
intro = scrapy.Field()
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
import pymongo
video_spider = ['douban']
class DoubanPipeline:
def process_item(self, item, spider):
print(item)
return item
class MysqlPipeLine:
def open_spider(self, spider):
self.spider = spider
self.mysql = pymysql.connect(host='localhost',port=3306,user='root',password='root')
self.cursor = self.mysql.cursor()
# 创建video数据库和相关爬虫表
if self.spider.name in video_spider:
self.create_db('video')
'''创建数据库'''
def create_db(self,db_name):
sql = f'''CREATE DATABASE IF NOT EXISTS {db_name}'''
try:
self.cursor.execute(sql)
self.mysql.select_db(db_name)
if self.spider.name == 'douban':
self.create_douban_table()
except Exception as e:
print(f'创建{db_name}数据库失败:{e}')
'''创建表douban'''
def create_douban_table(self):
sql = f'''
CREATE TABLE IF NOT EXISTS {self.spider.name}(
id INT AUTO_INCREMENT,
title VARCHAR(255),
rating FLOAT,
quote VARCHAR(255),
intro TEXT,
PRIMARY KEY(id)
)
'''
try:
self.cursor.execute(sql)
except Exception as e:
print(f'创建douban表失败:{e}')
def process_item(self, item, spider):
if spider.name == 'douban':
sql = f'''INSERT INTO {spider.name}(title,rating,quote,intro) VALUES(%(title)s,%(rating)s,%(quote)s,%(intro)s)'''
try:
item['rating'] = float(item['rating'])
self.cursor.execute(sql,dict(item))
self.mysql.commit()
except Exception as e:
print(f'”{item["title"]}”插入失败:{e}')
self.mysql.rollback()
return item
def close_spider(self,spider):
self.mysql.close()
class MongoPipeLine:
def open_spider(self, spider):
self.spider = spider
self.mongo = pymongo.MongoClient(host='localhost',port=27017)
# 创建video数据库和相关爬虫表
if self.spider.name in video_spider:
self.cursor = self.mongo['video'][self.spider.name]
def process_item(self, item, spider):
try:
self.cursor.insert_one(dict(item))
except Exception as e:
print(f'”{item["title"]}”插入失败:{e}')
return item
def close_spider(self, spider):
self.mongo.close()
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from fake_useragent import UserAgent
from scrapy.http import Request,HtmlResponse
from scrapySpider.kuaidaili import Kuaidaili
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class DoubanDownloaderMiddleware:
def __init__(self):
self.ua = UserAgent()
self.kuaidaili = Kuaidaili()
#初始化一个代理ip
self.first_ip = self.kuaidaili.get_ip()
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request:Request, spider):
#设置UA
request.headers['User-Agent'] = self.ua.random
#设置代理
request.meta['proxy'] = self.first_ip
request.meta['download_timeout'] = 5
spider.logger.info(f'ip:{request.meta["proxy"]}')
return None
def process_response(self, request, response:HtmlResponse, spider):
spider.logger.info(f'ip:{request.meta["proxy"]}')
if response.status == 200:
return response
#代理失效小重新设置代理,并返回request重新请求
request.meta['proxy'] = self.kuaidaili.get_ip()
request.meta['download_timeout'] = 2
return request
def spider_opened(self, spider):
spider.logger.info(f'"{spider.name}"Spide')
1.运行命令创建项目:scrapy startproject seleniumScrapySpider
2.进入项目目录:cd .\seleniumScrapySpider
3.运行命令创建爬虫:scrapy genspider seleniumTxWork careers.tencent.com
4.目录结构说明
```
|-- scrapySpider 项目目录
| |-- scrapySpider 项目目录
| | |-- spiders 爬虫文件目录
| | | |-- seleniumTxWork.py 爬虫文件
| | |-- items.py 定义数据模型文件,类似于数据库表的模式或数据结构的定义
| | |-- middlewares.py 定义中间件文件,用于对请求和响应进行处理和修改
| | |-- pipelines.py 定义数据处理管道(Pipeline)文件,用于处理爬取到的数据的组件
| | |-- settings.py 定义配置文件,用于配置 Scrapy 项目的各种设置选项和参数
| |-- scrapy.cfg 框架中的配置文件,用于指定项目的结构和元数据信息
```
5.设置全局chrome驱动,把chrome驱动文件复制到miniconda根目录,不设置驱动运行程序会报“‘chromedriver’ executable needs to be in PATH”错误
6.创建快代理文件scrapySpider>kuaidaili.py:https://www.kuaidaili.com/
```
import requests
class Kuaidaili():
request_url = {
# 获取代理ip前面
'getIpSignature': 'https://auth.kdlapi.com/api/get_secret_token',
# 获取代理ip
'getIp': 'https://dps.kdlapi.com/api/getdps?secret_id=oy2q5xu76k4s8olx59et&num=1&signature={}'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
ip_use = '购买代理用户名'
ip_password = '购买代理密码'
def __init__(self):
'''创建request会话对象'''
self.request_session = requests.Session()
self.request_session.headers.update(self.headers)
# 获取代理ip签名
@classmethod
def get_ip_url(cls):
par = {
'secret_id': 'oy2q5xu76k4s8olx59et',
'secret_key': '5xg6gvouc0vszfw0kxs1a8vrw1r6ity7'
}
response = requests.post(cls.request_url['getIpSignature'],data=par)
response_data = response.json()
return cls.request_url['getIp'].format(response_data['data']['secret_token'])
@classmethod
def get_ip(cls):
url = cls.get_ip_url()
response = requests.get(url)
return f'http://{cls.ip_use}:{cls.ip_password}@{response.text}/'
if __name__ == '__main__':
kuaidaili = Kuaidaili()
print(kuaidaili.get_ip())
```
7.爬取腾讯招聘案例
import scrapy
import scrapy.cmdline
from scrapy.http import HtmlResponse, Request
from seleniumScrapySpider.items import SeleniumScrapySpiderItem
import loguru
import re
class SeleniumTxWorkSpider(scrapy.Spider):
name = "seleniumTxWork"
allowed_domains = ["careers.tencent.com"]
start_urls = ["https://careers.tencent.com/search.html?index={}&keyword=python"]
def parse(self, response: HtmlResponse, **kwargs):
work_list = response.xpath('//div[@class="recruit-list"]')
reg = re.compile(r'\n|\s')
for work in work_list:
work_item = SeleniumScrapySpiderItem()
work_item['workName'] = re.sub(reg, '',
work.xpath('.//span[@class="job-recruit-title"]/text()').extract_first())
work_item['workAddress'] = re.sub(reg, '', work.xpath(
'.//span[@class="job-recruit-location"]/text()').extract_first())
work_item['workResponsibility'] = re.sub(reg, '',
work.xpath('.//p[@class="recruit-text"]/text()').extract_first())
work_item['workYearsName'] = re.sub(reg, '', work.xpath(
'.//p[@class="recruit-tips"]/span[5]/text()').extract_first())
yield work_item
'''根据分页总数获取其他页数据'''
page_total = int(response.xpath('//ul[@class="page-list"]/li[last() - 1]/span/text()').extract_first())
active_page = int(
response.xpath('//ul[@class="page-list"]/li[@class="page-li active"]/span/text()').extract_first())
if page_total != active_page:
active_page += 1
loguru.logger.info(f'******************总页数{page_total},开始抓取第 {active_page} 页数据******************')
yield Request(url=self.start_urls[0].format(active_page))
else:
loguru.logger.info('******************数据已全部抓取完毕******************')
def start_requests(self):
loguru.logger.info('******************开始抓取第 1 页数据******************')
yield Request(url=self.start_urls[0].format(1))
'''获取固定页数数据'''
# for i in range(1,6):
# loguru.logger.info(f'******************开始抓取第 {i} 页数据******************')
# yield Request(url=self.start_urls[0].format(i))
if __name__ == '__main__':
scrapy.cmdline.execute('scrapy crawl seleniumTxWork'.split())
# Scrapy settings for seleniumScrapySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "seleniumScrapySpider"
SPIDER_MODULES = ["seleniumScrapySpider.spiders"]
NEWSPIDER_MODULE = "seleniumScrapySpider.spiders"
#需要换上自己SCRAPEOPS中的key
SCRAPEOPS_API_KEY = 'b0a936d4-762b-4270-9e87-a2883c2a683e'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "seleniumScrapySpider (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en",
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# "seleniumScrapySpider.middlewares.SeleniumscrapyspiderSpiderMiddleware": 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
"seleniumScrapySpider.middlewares.SeleniumDownloaderMiddleware": 543,
'scrapeops_scrapy.middleware.retry.RetryMiddleware': 550,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
EXTENSIONS = {
'scrapeops_scrapy.extension.ScrapeOpsMonitor': 500,
}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"seleniumScrapySpider.pipelines.MysqlPipeLine": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
# 日志配置
LOG_FILE = 'log.log'
LOG_FILE_APPEND = False
LOG_LEVEL = 'INFO'
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class SeleniumScrapySpiderItem(scrapy.Item):
workName = scrapy.Field()
workAddress = scrapy.Field()
workResponsibility = scrapy.Field()
workYearsName = scrapy.Field()
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
import loguru
class MysqlPipeLine:
def open_spider(self, spider):
self.spider = spider
self.mysql = pymysql.connect(host='localhost',port=3306,user='root',password='root')
self.cursor = self.mysql.cursor()
self.create_db('job')
'''创建数据库'''
def create_db(self,db_name):
sql = f'''CREATE DATABASE IF NOT EXISTS {db_name}'''
try:
self.cursor.execute(sql)
self.mysql.select_db(db_name)
self.create_job_table()
except Exception as e:
loguru.logger.info(f'创建{db_name}数据库失败:{e}')
'''创建表'''
def create_job_table(self):
sql = '''
CREATE TABLE IF NOT EXISTS txWork(
workId INT AUTO_INCREMENT,
workName VARCHAR(255),
workAddress VARCHAR(255),
workResponsibility TEXT,
workYearsName VARCHAR(255),
PRIMARY KEY(workId)
)
'''
try:
self.cursor.execute(sql)
except Exception as e:
loguru.logger.info(f'创建TxWork表失败:{e}')
def process_item(self, item, spider):
sql = f'''INSERT INTO txWork(workName,workAddress,workResponsibility,workYearsName) VALUES(%(workName)s,%(workAddress)s,%(workResponsibility)s,%(workYearsName)s)'''
try:
self.cursor.execute(sql, dict(item))
self.mysql.commit()
loguru.logger.info(f'”{item["workName"]}”插入成功')
except Exception as e:
loguru.logger.info(f'”{item["workName"]}”插入失败:{e}')
self.mysql.rollback()
return item
def close_spider(self,spider):
self.mysql.close()
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium.webdriver import Chrome,ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from fake_useragent import UserAgent
from seleniumScrapySpider.kuaidaili import Kuaidaili
import loguru
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class SeleniumDownloaderMiddleware:
def __init__(self):
chrome_options = ChromeOptions()
chrome_options.add_experimental_option('detach', True) # 不自动关闭浏览器
chrome_options.add_experimental_option('excludeSwitches',['enable-automation']) # 以开发者模式启动调试chrome,可以去掉提示受到自动软件控制
chrome_options.add_experimental_option('useAutomationExtension', False) # 去掉提示以开发者模式调用
chrome_options.add_argument('--start-maximized') # 最大化运行(全屏窗口)
self.browser = Chrome(options=chrome_options)
self.ua = UserAgent()
self.kuaidaili = Kuaidaili()
#初始化一个代理ip
self.first_ip = self.kuaidaili.get_ip()
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
return s
def process_request(self, request, spider):
# 设置UA
request.headers['User-Agent'] = self.ua.random
# 设置代理
request.meta['proxy'] = self.first_ip
request.meta['download_timeout'] = 5
return None
def process_response(self, request, response, spider):
loguru.logger.info(f'代理ip:{request.meta["proxy"]}')
if response.status == 200:
self.browser.get(request.url)
wait = WebDriverWait(self.browser, 10) # 设置等待时间10s
wait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME,'recruit-list'))) # 设置判断条件:等待id='kw'的元素加载完成
return HtmlResponse(url=request.url,body=self.browser.page_source,encoding='utf-8',request=request)
# 代理失效小重新设置代理,并返回request重新请求
request.meta['proxy'] = self.kuaidaili.get_ip()
request.meta['download_timeout'] = 2
return request
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_closed(self, spider):
self.browser.close()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。