赞
踩
《将夜》是根据猫腻小说改编,陈飞宇和宋伊人主演,最近在腾讯视频热播的电视剧,闲来无事,在学Python爬虫的时候想要爬取评论看看。几经努力,菜鸟水平终于能够爬取短评了。由于豆瓣的限制,即使在登录的状态,依然只能爬取500条评论,具体代码如下:
# 调用相关包 import json import random import requests import time import pandas as pd from pyquery import PyQuery as pq import pymongo from bs4 import BeautifulSoup as bs import re import os from pyecharts import Bar, Geo, Line, Overlap import jieba from scipy.misc import imread from wordcloud import WordCloud, ImageColorGenerator import matplotlib.pyplot as plt from collections import Counter # 存储到数据库 MONGO_URL = 'localhost' MONGO_DB = 'douban' MONGO_COLLECTION = 'jiangye_comments' client = pymongo.MongoClient(MONGO_URL) db = client[MONGO_DB] def save_to_mongo(result): try: if db[MONGO_COLLECTION].insert(result): print('存储到MongoDB成功') except Exception: print('存储到MongoDB失败') session = requests.Session() def loginin(): url = 'https://www.douban.com/accounts/login' name = '你的用户名' psw = '你的密码' headers = { "User-Agent": "'Mozilla/5.0 (Windows NT 6.1; rv:53.0)Gecko/20100101 Firefox/53.0'", "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3", "Accept-Encoding": "gzip,deflate", "Connection":"keep-alive" } data = { 'form_email': name, 'form_password': psw, 'source': 'index_nav', 'remember': 'on' } captcha = session.get(url, headers=headers, timeout=30) soup = bs(captcha.content, 'lxml') img = soup.find_all('img', id='captcha_image') print(img) if img: captcha_url = re.findall('src="(.*?)"', str(img))[0] print("验证码所在标签为:", captcha_url) a = captcha_url.split('&')[0] capid = a.split('=')[1] print(capid) cap = session.get(captcha_url, headers=headers).content with open('captcha.jpg', 'wb') as f: f.write(cap) f.close() im = Image.open('captcha.jpg') im.show() capimg = input('请输入验证码:') newdata = { 'captcha-solution': capimg, 'captcha-id': capid } data.update(newdata) print(data) os.remove('captcha.jpg') else: print('不存在验证码,请直接登录') r = session.post(url, data=data, headers=headers, timeout=30) print(r.status_code) if __name__ == '__main__': loginin() # 爬取数据 for i in range(0, 25): i1 = i * 20 try: time.sleep(2) url = "https://movie.douban.com/subject/26848645/comments?start=" + str(i1) + \ "&limit=20&sort=new_score&status=P" print("crawing:%s" % url) # html = requests.get(url=url, cookies=cookie, headers=headers).content html = session.get(url).content doc = pq(html) items = doc('#comments .comment-item').items() for item in items: jiangye_comments = { 'author': item.find('.avatar a').attr('title'), 'votes': item.find('.votes').text(), 'rating': item.find('.rating').text(), 'date': item.find('.comment-time').text(), 'comments': item.find('.short').text() } save_to_mongo(jiangye_comments) except: continue
由于豆瓣有反爬机制,爬取次数较多的话会限制登录,所以采用登录的方式,获取Session,然后调用Session打开待爬取网页。登录过程中,由于登录次数过多会要求验证码,需要判断验证码是否存在,如果存在,会在本地保存验证码,要求在shell输入,即可正确登录。
由于豆瓣只能读取500条评论,读取的结果如下:
## 绘制词云
在数据库中提取短评信息,分词并设置截止词,绘制词云所用的原图为:
绘制词云的代码如下:
# 调用相关包 import json import random import requests import time import re import pandas as pd import numpy as np from PIL import Image from pyquery import PyQuery as pq import pymongo import os from os import path from pyecharts import Bar, Geo, Line, Overlap import jieba from scipy.misc import imread from wordcloud import WordCloud, ImageColorGenerator import matplotlib.pyplot as plt from collections import Counter # 从数据库读取数据 MONGO_URL = 'localhost' MONGO_DB = 'douban' MONGO_COLLECTION = 'jiangye_comments' client = pymongo.MongoClient(MONGO_URL) db = client[MONGO_DB] data = pd.DataFrame(list(db[MONGO_COLLECTION].find())) def analysis(data): jieba.load_userdict("userdict.txt") text = '' for i in data['comments'].values: symbol_to_replace = '[!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~]+' i = re.sub(symbol_to_replace, '', i) text += ' '.join(jieba.cut(i, cut_all=False)) # print(text) d = path.dirname(__file__) if "__file__" in locals() else os.getcwd() background_Image = np.array(Image.open(path.join(d, "chen.jpg"))) # 添加stopswords stopwords = set() # 先运行对text进行词频统计再排序,再选择要增加的停用词 stopwords.update(['如何','怎么','一个','什么','为什么','还是','我们','为何','可能','不是','没有','哪些','成为','可以','背后','到底','就是','这么','不要','怎样','为了','能否','你们','还有','这样','这个','真的','那些','觉得','虽然','除了','感觉','但是','很多','有点','已经','那么','完全','实在','开始','其他','自己']) wc = WordCloud( background_color = 'black', font_path = "C:\\Windows\\Fonts\\simhei.ttf", mask = background_Image, stopwords = stopwords, max_words = 2000, margin = 2, max_font_size = 100, random_state = 42, scale = 2 ) wc.generate_from_text(text) process_word = WordCloud.process_text(wc, text) # 下面是字典排序 sort = sorted(process_word.items(), key=lambda e:e[1], reverse=True) print(sort[:50]) img_colors = ImageColorGenerator(background_Image) wc.recolor(color_func = img_colors) plt.imshow(wc, interpolation='bilinear') plt.axis('off') plt.tight_layout() plt.savefig('jiangye.png', dpi=200) plt.show() # 绘制词云 analysis(data)
最后绘制的词云如下:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。