赞
踩
from multiprocessing import Process,Queue
import requests
import re
from lxml.html import etree
import json
import time
from concurrent.futures import ProcessPoolExecutor
def down_load_page_data(req_url):
pattern = re.compile('.*?page=(\d+).*?city_id=(\d+).*?shop_id=(\d+)')
result = re.findall(pattern, req_url)[0]
DATE_SHOW_LOC = result[1]
DATE_SHOW_SHOP = result[2]
response = download_data(req_url,DATE_SHOW_LOC,DATE_SHOW_SHOP)
if response.status_code == 200:
# print(result)
current_page = int(result[0])
if current_page == 1:
data = {'page':current_page,'data':response.text}
with open(str(result[1])+'.html','w') as file:
file.write(response.text)
next_page = re.sub('page=\d+', 'page=' + str(current_page + 1), response.url)
print('正在获取第'+str(current_page+1)+'页',DATE_SHOW_LOC,DATE_SHOW_SHOP)
else:
data = {'page':current_page, 'data': response.text}
if current_page !=1:
if isinstance(json.loads(response.text),list):
next_page = re.sub('page=\d+','page='+str(current_page+1),response.url)
print('正在获取第' + str(current_page+1) + '页', DATE_SHOW_LOC, DATE_SHOW_SHOP)
else:
next_page = None
print(response.text)
print('已获取到' + str(current_page) + '页','没有数据了',DATE_SHOW_LOC, DATE_SHOW_SHOP)
return data,next_page
def parse_data(futures):
# print('正在解析数据')
data = futures.result()[0]
next_page = futures.result()[1]
if next_page:
handler = download_page_pool.submit(down_load_page_data,next_page)
handler.add_done_callback(parse_data)
if data['page'] == 1:
print('解析第一页数据')
html_element = etree.HTML(data['data'])
hot_active = html_element.xpath('//div[@class="hot_detail fn-clear"]')
download_detail_pool = ProcessPoolExecutor(2)
for hot_div in hot_active:
# 活动详情的url地址
full_detail_url = 'http://date.jiayuan.com' + hot_div.xpath('.//h2[@class="hot_title"]/a/@href')[0]
handler = download_detail_pool.submit(download_data,full_detail_url)
handler.add_done_callback(parse_detail_data)
more_active = html_element.xpath('//ul[@class="review_detail fn-clear t-activiUl"]/li')
for more_li in more_active:
# 活动详情的url地址
full_detail_url = 'http://date.jiayuan.com' + more_li.xpath('.//a[@class="review_link"]/@href')[0]
handler = download_detail_pool.submit(download_data, full_detail_url)
handler.add_done_callback(parse_detail_data)
download_detail_pool.shutdown()
else:
# 获取到url中的相关参数(上一次请求的页码,城市id,地区id)
print('解析其他页面数据', data['page'])
data = json.loads(data['data'])
if isinstance(data, list):
# 是列表,说明得到的是正确的数据,
download_detail_pool = ProcessPoolExecutor(2)
for sub_dict in data:
id = sub_dict['id']
full_detail_url = 'http://date.jiayuan.com/activityreviewdetail.php?id=%s' % id
handler = download_detail_pool.submit(download_data, full_detail_url)
handler.add_done_callback(parse_detail_data)
download_detail_pool.shutdown()
def download_data(req_url,DATE_SHOW_LOC=None,DATE_SHOW_SHOP=None):
"""
下载器,执行任务的下载
"""
if DATE_SHOW_LOC and DATE_SHOW_SHOP:
cookie = '_gscu_1380850711=43812116hs5dyy11; accessID=20181222071935501079; PHPSESSID=b59c131c44e32d744ab8ad3bb6e27a45; plat=date_pc; uv_flag=223.72.78.37; user_access=1; DATE_SHOW_LOC=%s; DATE_SHOW_SHOP=%s' % (DATE_SHOW_LOC,DATE_SHOW_SHOP)
else:
cookie = '_gscu_1380850711=43812116hs5dyy11; accessID=20181222071935501079; PHPSESSID=b59c131c44e32d744ab8ad3bb6e27a45; plat=date_pc; uv_flag=223.72.78.37; user_access=1; DATE_SHOW_LOC=4201; DATE_SHOW_SHOP=33'
req_header = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Cookie': cookie,
'Referer': 'http://date.jiayuan.com/eventslist.php',
}
response = requests.get(req_url, headers=req_header)
if response.status_code == 200:
print(response.url)
return response
def parse_detail_data(futures):
response = futures.result()
html_element = etree.HTML(response.text)
with open('detail.html','w') as file:
file.write(response.text)
#创建一个字典,存放获取的数据
item = {}
# 活动标题
item['title'] = ''.join(html_element.xpath('//h1[@class="detail_title"]/text()')[0])
# 活动时间
item['time'] = ','.join(html_element.xpath('//div[@class="detail_right fn-left"]/ul[@class="detail_info"]/li[1]//text()')[0])
# 活动地址
item['adress'] = html_element.xpath('//ul[@class="detail_info"]/li[2]/text()')[0]
# 参加人数
item['joinnum'] = html_element.xpath('//ul[@class="detail_info"]/li[3]/span[1]/text()')[0]
# 预约人数
item['yuyue'] = html_element.xpath('//ul[@class="detail_info"]/li[3]/span[2]/text()')[0]
# 介绍
item['intreduces'] = html_element.xpath('//div[@class="detail_act fn-clear"][1]//p[@class="info_word"]/span[1]/text()')[0]
# 提示
item['point'] = html_element.xpath('//div[@class="detail_act fn-clear"][2]//p[@class="info_word"]/text()')[0]
# 体验店介绍
item['introductionStore'] = ''.join(html_element.xpath('//div[@class="detail_act fn-clear"][3]//p[@class="info_word"]/text()'))
# 图片连接
item['coverImage'] = html_element.xpath('//div[@class="detail_left fn-left"]/img/@data-original')[0]
# print(item['title'],item['adress'],item['time'])
# print(item)
with open('huodong.json','a+') as file:
json_str = json.dumps(item,ensure_ascii=False) + '\n'
file.write(json_str)
if __name__ == '__main__':
#设置任务队列
# 'http://date.jiayuan.com/eventslist_new.php?page=1&city_id=31&shop_id=15',
# # 湖北武汉地区的活动url接口
# 'http://date.jiayuan.com/eventslist_new.php?page=1&city_id=4201&shop_id=33',
# taskQueue.put('http://date.jiayuan.com/eventslist_new.php?page=1&city_id=31&shop_id=15')
# taskQueue.put('http://date.jiayuan.com/eventslist_new.php?page=1&city_id=4201&shop_id=33')
# taskQueue.put('http://date.jiayuan.com/eventslist_new.php?page=1&city_id=1501&shop_id=51')
download_page_pool = ProcessPoolExecutor(4)
# download_detail_pool = ProcessPoolExecutor(4)
start_urls = [
'http://date.jiayuan.com/eventslist_new.php?page=1&city_id=31&shop_id=15',
'http://date.jiayuan.com/eventslist_new.php?page=1&city_id=4201&shop_id=33',
'http://date.jiayuan.com/eventslist_new.php?page=1&city_id=1501&shop_id=51',
]
for url in start_urls:
handler = download_page_pool.submit(down_load_page_data,url)
handler.add_done_callback(parse_data)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。