赞
踩
今天,做了一个爬虫的新项目——IT桔子(www.itjuzi.com/company)的信息爬取.
IT桔子是关注IT互联网行业的结构化的公司数据库和商业信息提供商
IT桔子致力于通过信息和数据的生产、聚合、挖掘、加工、处理,帮助目标用户和客户节约时间和金钱、提高效率,以辅助其各类商业行为,包括风险投资、收购、竞争情报、细分行业信息、国外公司产品信息数据服务等。
明确目标数据,详情页的:(items.py)
a.公司简介:company_name ;company_slogan; company_link; company_tags
b.公司基本信息:company_info; company_full_name; create_time; company_size; company_status
c.融资情况:invest_list (列表结构,每一个元素是一个字典,存放一次融资记录)
d.团队信息:team_list (列表结构,每一个元素是一个字典,存放一个负责人信息)
e.产品信息:product_list(列表结构,每一个元素是一个字典,存放一个产品的信息)
分析网站的爬取思路:
a.信息都在详情页,只需要更改url尾缀数字即可;
b.因为有的是静态页面有的是动态页面,所以要使用,selenium工具模拟chrome浏览器访问;
c.登录后才可以请求完整信息,要模拟登陆/携带cookies信息访问
d.html页面结构不一致,增加判断语句,增强爬虫代码的健壮性
e.遇到其他问题,根据具体情况分析,解决问题
选择框架,scrapy;选择模块,spider/crawl_spider都可以
items.py
import scrapy
class JuziItem(scrapy.Item):
# 1.公司简介
company_name = scrapy.Field()
company_slogan = scrapy.Field()
company_link = scrapy.Field()
company_tags = scrapy.Field()
# 2.公司基本信息
company_info = scrapy.Field()
company_full_name = scrapy.Field()
create_time = scrapy.Field()
company_size = scrapy.Field()
company_status = scrapy.Field()
# 3. 融资
invest_list = scrapy.Field()
# 4. 团队信息
team_list = scrapy.Field()
# 5. 产品信息
product_list = scrapy.Field()
url_link = scrapy.Field()
# 数据源
data_source = scrapy.Field()
data_time = scrapy.Field()
itjuzi.py
# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from ITJuzi.items import JuziItem
class JuziSpider(scrapy.Spider):
name = 'itjuzi'
allowed_domains = ['itjuzi.com']
base_url = 'https://www.itjuzi.com/company/'
offset = 1
start_urls = [base_url + str(offset)]
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Cookie": "gr_user_id=8b2a0647-ed6e-4da9-bd79-0927840738ba; _ga=GA1.2.1065816449.1520818726; MEIQIA_EXTRA_TRACK_ID=11oNlg9W4BPRdVJbbc5Mg9covSB; _gid=GA1.2.1051909062.1524629235; acw_tc=AQAAADMxgTrydgkAxrxRZa/yV6lXP/Tv; Hm_lvt_1c587ad486cdb6b962e94fc2002edf89=1524629235,1524637618,1524702648; gr_session_id_eee5a46c52000d401f969f4535bdaa78=5ac2fdfd-b747-46e3-84a3-573d49e8f0f0_true; identity=1019197976%40qq.com; remember_code=N8cv8vX9xK; unique_token=498323; acw_sc__=5ae1302fee977bcf1d5f28b7fe96b94d7b5de97c; session=e12ae81c38e8383dcaeaaff9ded967758bc5a01c; Hm_lpvt_1c587ad486cdb6b962e94fc2002edf89=1524707391",
"Host": "www.itjuzi.com",
"If-Modified-Since": "Thu, 26 Apr 2018 01:49:47 GMT",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36",
}
# 设置cookie登录的验证
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)
def parse(self, response):
# 解析数据
soup = BeautifulSoup(response.body, 'lxml')
item = JuziItem()
item['url_link'] = response.url
# 1.公司简介
# cpy1 = soup.find(attrs={'class':"infoheadrow-v2"})
cpy1 = soup.find(class_='infoheadrow-v2')
if cpy1:
item['company_name'] = cpy1.select('.seo-important-title')[0].get('data-name')
item['company_slogan'] = cpy1.select('.seo-slogan')[0].get_text()
item['company_link'] = cpy1.select('.link-line a')[-1].get_text().strip()
tag_list = cpy1.select('.tag-list a')
tag_str = ""
for tag in tag_list:
tag_str += tag.get_text().strip() + " "
item['company_tags'] = tag_str
# 2.公司基本信息
cpy2 = soup.find(class_='block-inc-info')
if cpy2:
item['company_info'] = cpy2.select('.block div')[-1].get_text().strip()
item['company_full_name'] = cpy2.select('.des-more h2')[0].get_text().strip()
item['create_time'] = cpy2.select('.des-more h3')[0].get_text().strip()
item['company_size'] = cpy2.select('.des-more h3')[1].get_text().strip()
item['company_status'] = cpy2.select('.pull-right')[0].get_text().strip()
# 3. 融资
cpy3 = soup.find(attrs={'id': "invest-portfolio"})
if cpy3:
tr_list = cpy3.select('tr')
inv_list = []
for tr in tr_list:
if len(tr.select('td')) > 2:
tr_dict = {}
tr_dict['time'] = tr.select('td')[0].get_text().strip()
tr_dict['round'] = tr.select('td')[1].get_text().strip()
tr_dict['money'] = tr.select('td')[2].get_text().strip()
tr_dict['name'] = tr.select('td')[3].get_text().strip()
inv_list.append(tr_dict)
item['invest_list'] = inv_list
# 4. 团队信息
cpy4 = soup.select('.team-list')[0]
if cpy4:
tea_list = cpy4.select('li')
team_temp_list = []
for tr in tea_list:
tr_dict = {}
tr_dict['name'] = tr.select('.per-name')[0].get_text().strip()
tr_dict['position'] = tr.select('.per-position')[0].get_text().strip()
tr_dict['info'] = tr.select('.per-des')[0].get_text().strip()
team_temp_list.append(tr_dict)
item['team_list'] = team_temp_list
# 5. 产品信息
cpy5 = soup.select('.product-list')[0]
if cpy5:
li_list = cpy5.select('li')
pro_temp_list = []
for tr in li_list:
tr_dict = {}
tr_dict['name'] = tr.select('.product-name')[0].get_text().strip()
tr_dict['info'] = tr.select('.product-des')[0].get_text().strip()
pro_temp_list.append(tr_dict)
item['product_list'] = pro_temp_list
# 将解析完毕的数据 交给 --引擎 --管道
yield item
self.offset += 1
url = self.base_url + str(self.offset)
yield scrapy.Request(url=url, callback=self.parse)
pipelines.py
from datetime import datetime
class JuziPipeline(object):
def process_item(self, item, spider):
item['data_source'] = spider.name
item['data_time'] = datetime.utcnow()
return item
middlewares.py
class ChromeMiddleware(object):
def process_request(self, request, spider):
driver = webdriver.Chrome()
driver.get(request.url)
time.sleep(5)
data = driver.page_source
driver.quit()
# 拦截系统的下载
return scrapy.http.HtmlResponse(url=request.url, body=data.encode('utf-8'), encoding='utf-8', request=request)
settings.py
BOT_NAME = 'ITJuzi'
SPIDER_MODULES = ['ITJuzi.spiders']
NEWSPIDER_MODULE = 'ITJuzi.spiders'
# 1.设置 分布式的 去重组件
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 2.设置 分布式的 调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 3.允许爬虫中途停止 中断
SCHEDULER_PERSIST = True
ITEM_PIPELINES = {
'ITJuzi.pipelines.JuziPipeline': 300,
'scrapy_redis.pipelines.RedisPipeline': 400
}
# 4.设置 redis 数据库的端口号 和IP
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from ITJuzi.items import JuziItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
class JuziSpider(CrawlSpider):
name = 'juzi_crawl'
allowed_domains = ['itjuzi.com']
start_urls = [
# 1.国内创业
'https://www.itjuzi.com/company',
# 2.国内上市
'https://www.itjuzi.com/company/listed',
# 3.国外创业
'https://www.itjuzi.com/company/foreign',
# 4.国外上市
'https://www.itjuzi.com/foreign/listed'
]
# 解析的规则
rules = (
# 1.国内创业--列表页 没有callback函数;默认就是follow=True
Rule(LinkExtractor(allow='company\?page=')),
# 2.国内上市 --列表页
Rule(LinkExtractor(allow='company/listed\?page=')),
# 3.国外创业 --列表页
Rule(LinkExtractor(allow='company/foreign\?page=')),
# 4.国外上市 --列表页
Rule(LinkExtractor(allow='company/foreign/listed\?page=')),
# 5详情页
Rule(LinkExtractor(allow='company/\d+'), callback="parse_detail", follow=False),
)
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Cookie": "gr_user_id=8b2a0647-ed6e-4da9-bd79-0927840738ba; _ga=GA1.2.1065816449.1520818726; MEIQIA_EXTRA_TRACK_ID=11oNlg9W4BPRdVJbbc5Mg9covSB; _gid=GA1.2.1051909062.1524629235; acw_tc=AQAAADMxgTrydgkAxrxRZa/yV6lXP/Tv; Hm_lvt_1c587ad486cdb6b962e94fc2002edf89=1524629235,1524637618,1524702648; gr_session_id_eee5a46c52000d401f969f4535bdaa78=5ac2fdfd-b747-46e3-84a3-573d49e8f0f0_true; identity=1019197976%40qq.com; remember_code=N8cv8vX9xK; unique_token=498323; acw_sc__=5ae1302fee977bcf1d5f28b7fe96b94d7b5de97c; session=e12ae81c38e8383dcaeaaff9ded967758bc5a01c; Hm_lpvt_1c587ad486cdb6b962e94fc2002edf89=1524707391",
"Host": "www.itjuzi.com",
"If-Modified-Since": "Thu, 26 Apr 2018 01:49:47 GMT",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36",
}
# 设置cookie登录的验证
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)
def parse_detail(self, response):
# 解析数据
soup = BeautifulSoup(response.body, 'lxml')
item = JuziItem()
item['url_link'] = response.url
# 1.公司简介
# cpy1 = soup.find(attrs={'class':"infoheadrow-v2"})
cpy1 = soup.find(class_='infoheadrow-v2')
if cpy1:
item['company_name'] = cpy1.select('.seo-important-title')[0].get('data-name')
item['company_slogan'] = cpy1.select('.seo-slogan')[0].get_text()
item['company_link'] = cpy1.select('.link-line a')[-1].get_text().strip()
tag_list = cpy1.select('.tag-list a')
tag_str = ""
for tag in tag_list:
tag_str += tag.get_text().strip() + " "
item['company_tags'] = tag_str
# 2.公司基本信息
cpy2 = soup.find(class_='block-inc-info')
if cpy2:
item['company_info'] = cpy2.select('.block div')[-1].get_text().strip()
item['company_full_name'] = cpy2.select('.des-more h2')[0].get_text().strip()
item['create_time'] = cpy2.select('.des-more h3')[0].get_text().strip()
item['company_size'] = cpy2.select('.des-more h3')[1].get_text().strip()
item['company_status'] = cpy2.select('.pull-right')[0].get_text().strip()
# 3. 融资
cpy3 = soup.find(attrs={'id': "invest-portfolio"})
if cpy3:
tr_list = cpy3.select('tr')
inv_list = []
for tr in tr_list:
if len(tr.select('td')) > 2:
tr_dict = {}
tr_dict['time'] = tr.select('td')[0].get_text().strip()
tr_dict['round'] = tr.select('td')[1].get_text().strip()
tr_dict['money'] = tr.select('td')[2].get_text().strip()
tr_dict['name'] = tr.select('td')[3].get_text().strip()
inv_list.append(tr_dict)
item['invest_list'] = inv_list
# 4. 团队信息
cpy4 = soup.select('.team-list')[0]
if cpy4:
tea_list = cpy4.select('li')
team_temp_list = []
for tr in tea_list:
tr_dict = {}
tr_dict['name'] = tr.select('.per-name')[0].get_text().strip()
tr_dict['position'] = tr.select('.per-position')[0].get_text().strip()
tr_dict['info'] = tr.select('.per-des')[0].get_text().strip()
team_temp_list.append(tr_dict)
item['team_list'] = team_temp_list
# 5. 产品信息
cpy5 = soup.select('.product-list')[0]
if cpy5:
li_list = cpy5.select('li')
pro_temp_list = []
for tr in li_list:
tr_dict = {}
tr_dict['name'] = tr.select('.product-name')[0].get_text().strip()
tr_dict['info'] = tr.select('.product-des')[0].get_text().strip()
pro_temp_list.append(tr_dict)
item['product_list'] = pro_temp_list
# 将解析完毕的数据 交给 --引擎 --管道
yield item
使用的spider模块
步骤
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
'scrapy_redis.pipelines.RedisPipeline': 400,
itjuzi_redis.py
...
from scrapy_redis.spiders import RedisSpider
class JuziSpider(RedisSpider):
name = 'juzi_redis'
allowed_domains = ['itjuzi.com']
redis_key = 'juzikey'
...
settings.py
...
#1. 启用 分布式 过滤器
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 2.启用 分布式 调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 3.启用 分布式 如果爬虫中断1000个 ,下次从中断的位置10001开始下载
SCHEDULER_PERSIST = True
# 4. redis的管道
'scrapy_redis.pipelines.RedisPipeline': 400,
#设置redis host port
REDIS_HOST = '192.168.90.169'
REDIS_PORT = 6379
...
sudo mongod
# process_aqi_mongodb.py
# -*- coding: utf-8 -*-
import json
import redis
import pymongo
def main():
# 指定Redis数据库信息
rediscli = redis.Redis(host='192.168.88.94', port=6379, db=0)
# 指定MongoDB数据库信息
mongocli = pymongo.MongoClient(host='127.0.0.1', port=27017)
# 创建数据库名
db = mongocli['aqi']
# 创建表名
sheet = db['aqi_data']
while True:
# FIFO模式为 blpop,LIFO模式为 brpop,获取键值
source, data = rediscli.blpop(["aqi:items"])
item = json.loads(data)
sheet.insert(item)
try:
print u"Processing: %(name)s <%(link)s>" % item
except KeyError:
print u"Error procesing: %r" % item
if __name__ == '__main__':
main()
mysql-server start
(平台不同,命令不同)mysql -uroot -p
create database aqi;
use aqi
#process_aqi_mysql.py
# -*- coding: utf-8 -*-
import json
import redis
import MySQLdb
def main():
# 指定redis数据库信息
rediscli = redis.StrictRedis(host='192.168.88.94', port = 6379, db = 0)
# 指定mysql数据库
mysqlcli = MySQLdb.connect(host='127.0.0.1', user='root', passwd='xxxxxxx', db = 'aqi', port=3306, use_unicode=True)
while True:
# FIFO模式为 blpop,LIFO模式为 brpop,获取键值
source, data = rediscli.blpop(["aqi:items"])
item = json.loads(data)
try:
# 使用cursor()方法获取操作游标
cur = mysqlcli.cursor()
# 使用execute方法执行SQL INSERT语句
cur.execute("INSERT INTO aqi_data (city, date, aqi, level, pm2_5, pm10, so2, co, no2, o3, rank, spider, crawled) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", [item['city'], item['date'], item['aqi'], item['level'], item['pm2_5'], item['pm10'], item['so2'], item['co'], item['no2'], item['o3'], item['rank'], item['spider'], item['crawled']])
# 提交sql事务
mysqlcli.commit()
#关闭本次操作
cur.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
if __name__ == '__main__':
main()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。