赞
踩
1、动态数据抓包演示
2、json数据解析
3、requests模块的使用
4、保存csv
安装命令:requests >>> pip install requests
通过开发者工具进行抓包分析, 分析自己想要数据内容 可以从哪里获取
分析数据 从第二页开始
用代码去模拟浏览器发送请求获取数据
import requests # 数据请求模块
import pprint # 格式化输出模块
import csv # 内置模块
import time
import re
def get_shop_info(html_url): # url = 'https://www.meituan.com/xiuxianyule/193306807/' headers = { 'Cookie': '_lxsdk_cuid=17e102d3914c8-000093bbbb0ed8-4303066-1fa400-17e102d3914c8; __mta=48537241.1640948906361.1640948906361.1640948906361.1; _hc.v=e83bebb5-d6ee-d90e-dd4b-4f2124f8f982.1640951715; ci=70; rvct=70; mt_c_token=2Tmbj8_Qihel3QR9oEXS4nEpnncAAAAABBEAAB9N2m2JXSE0N6xtRrgG6ikfQZQ3NBdwyQdV9vglW8XGMaIt38Lnu1_89Kzd0vMKEQ; iuuid=3C2110909379198F1809F560B5E33A58B83485173D8286ECD2C7F8AFFCC724B4; isid=2Tmbj8_Qihel3QR9oEXS4nEpnncAAAAABBEAAB9N2m2JXSE0N6xtRrgG6ikfQZQ3NBdwyQdV9vglW8XGMaIt38Lnu1_89Kzd0vMKEQ; logintype=normal; cityname=%E9%95%BF%E6%B2%99; _lxsdk=3C2110909379198F1809F560B5E33A58B83485173D8286ECD2C7F8AFFCC724B4; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; latlng=28.302546%2C112.868692; ci3=70; uuid=f7c4d3664ab34f13ad7f.1650110501.1.0.0; mtcdn=K; lt=9WbeLmhHHLhTVpnVu264fUCMYeIAAAAAQREAAKnrFL00wW5eC7mPjhHwIZwkUL11aa7lM7wOfgoO53f0uJpjKSRpO6LwCBDd9Fm-wA; u=266252179; n=qSP946594369; token2=9WbeLmhHHLhTVpnVu264fUCMYeIAAAAAQREAAKnrFL00wW5eC7mPjhHwIZwkUL11aa7lM7wOfgoO53f0uJpjKSRpO6LwCBDd9Fm-wA; unc=qSP946594369; firstTime=1650118043342; _lxsdk_s=18032a80c4c-4d4-d30-e8f%7C%7C129', 'Referer': 'https://chs.meituan.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36' } response = requests.get(url=html_url, headers=headers) # print(response.text) phone = re.findall('"phone":"(.*?)"', response.text)[0] # \n 不是换行符, \n符号而已 \转义字符给转移掉 openTime = re.findall('"openTime":"(.*?)"', response.text)[0].replace('\\n', '') address = re.findall('"address":"(.*?)"', response.text)[0] shop_info = [phone, openTime, address] return shop_info # 保存文件 创建文件夹 encoding='utf-8' 指定编码 如果说我用utf-8 还乱码怎么办 # w 会覆盖, a不会覆盖 f = open('究极无敌男人秘密最终版.csv', mode='a', encoding='utf-8', newline='') csv_writer = csv.DictWriter(f, fieldnames=[ '店名', '人均消费', '最低消费', '商圈', '店铺类型', '评分', '电话', '营业时间', '地址', '纬度', '经度', '详情页', ]) csv_writer.writeheader() # 写入表头 # html_url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/70?uuid=f7c4d3664ab34f13ad7f.1650110501.1.0.0&userid=266252179&limit=32&offset=64&cateId=-1&q=%E4%BC%9A%E6%89%80&token=9WbeLmhHHLhTVpnVu264fUCMYeIAAAAAQREAAKnrFL00wW5eC7mPjhHwIZwkUL11aa7lM7wOfgoO53f0uJpjKSRpO6LwCBDd9Fm-wA'
for page in range(0, 321, 32): # 从0 32 64 96 128 160 192 .... 320 time.sleep(1.5) # 延时等待 1.5S url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/70' # pycharm功能 快速批量替换, ctrl + R 选择需要替换目标, 运用正则表达式进行批量替换 data = { 'uuid': 'f7c4d3664ab34f13ad7f.1650110501.1.0.0', 'userid': '266252179', 'limit': '32', 'offset': page, 'cateId': '-1', 'q': '会所', 'token': '9WbeLmhHHLhTVpnVu264fUCMYeIAAAAAQREAAKnrFL00wW5eC7mPjhHwIZwkUL11aa7lM7wOfgoO53f0uJpjKSRpO6LwCBDd9Fm-wA', } # headers 伪装 python代码的 外套 # User-Agent 用户代理 浏览器基本的身份信息.... 最简单反反爬的手段 为了防止被识别是爬虫程序 # Referer 防盗链 告诉服务器我们请求url地址是从哪里跳转过来 headers = { 'Referer': 'https://chs.meituan.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36' } response = requests.get(url=url, params=data, headers=headers) # print(response) # <Response [403]> 状态码 表示没有访问权限 防盗链 200 请求成功
# print(response.json())
# pprint.pprint(response.json()) # 老师版本是 python 3.8
searchResult = response.json()['data']['searchResult'] for index in searchResult: # 把列表里面数据 一个一个提取出来 # pprint.pprint(index) href = f'https://www.meituan.com/xiuxianyule/{index["id"]}/' shop_info = get_shop_info(href) title = index['title'] # 店名 price = index['avgprice'] # 人均消费 lost_price = index['lowestprice'] # 最低消费 area = index['areaname'] # 商圈 shop_type = index['backCateName'] # 店铺类型 score = index['avgscore'] # 评分 latitude = index['latitude'] # 纬度 longitude = index['longitude'] # 经度 ctrl + D 快速复制 # tab 集体缩进 # shift + tab 取消缩进 dit = { '店名': title, '人均消费': price, '最低消费': lost_price, '商圈': area, '店铺类型': shop_type, '评分': score, '电话': shop_info[0], '营业时间': shop_info[1], '地址': shop_info[2], '纬度': latitude, '经度': longitude, '详情页': href, }
csv_writer.writerow(dit)
print(dit)
好了,我的这篇文章写到这里就结束啦!
有更多建议或问题可以评论区或私信我哦!一起加油努力叭(ง •_•)ง
喜欢就关注一下博主,或点赞收藏评论一下我的文章叭!!!
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。