赞
踩
大家好,我是EverdayForCode。你,今天学习了吗!
我太懒了,都结营一周多,我还没有把笔记写完!何来自律呀!五一放假五天,耍太嗨,连考试都没考,我是要凉了吗!
第一步:爱奇艺《青春有你2》评论数据爬取(参考链接:https://www.iqiyi.com/v_19ryfkiv8w.html#curid=15068699100_9f9bab7e0d1e30c494622af777f4ba39)
第二步:词频统计并可视化展示
第三步:绘制词云
第四步:结合PaddleHub,对评论进行内容审核
开发环境:
AIStudio
实训平台
!pip install jieba
!pip install wordcloud
# Linux系统默认字体文件路径
!ls /usr/share/fonts/
# 查看系统可用的ttf格式中文字体
!fc-list :lang=zh | grep ".ttf"
!wget https://mydueros.cdn.bcebos.com/font/simhei.ttf # 下载中文字体
# #创建字体目录fonts
!mkdir .fonts
# # 复制字体文件到该路径
!cp simhei.ttf .fonts/
#安装模型
!hub install porn_detection_lstm==1.1.0
!pip install --upgrade paddlehub
# **引入相关库**
from __future__ import print_function
import requests
import json
import re #正则匹配
import time #时间处理模块
import jieba #中文分词
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from PIL import Image
from wordcloud import WordCloud #绘制词云模块
import paddlehub as hub
# 请求爱奇艺评论接口,返回response信息 def getMovieinfo(url): ''' 请求爱奇艺评论接口,返回response信息 参数 url: 评论的url :return: response信息 ''' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0' } session = requests.Session() response = session.get(url, headers = headers) if response.status_code == 200: return response.text return None #解析json数据,获取评论 def saveMovieInfoToFile(lastId, arr): url='https://sns-comment.iqiyi.com/v3/comment/get_comments.action?agent_type=118&agent_version=9.11.5&authcookie=null&business_type=17&content_id=15068699100&page=&page_size=10&types=time&last_id=' url+=str(lastId) responseTxt = getMovieinfo(url) responseJson=json.loads(responseTxt) comments=responseJson['data']['comments'] for val in comments: # print(val.keys()) if 'content' in val.keys(): # print(val['content']) arr.append(val['content']) lastId = str(val['id']) return lastId
#去除文本中特殊字符 def clear_special_char(content): ''' 正则处理特殊字符 参数 content:原文本 return: 清除后的文本 ''' # s = re.sub(r"</?(.+?>| |\t|\r", "", content) # s = re.sub(r"\n", " ", s) # s = re.sub(r"\*", "\\*", s) # s = re.sub('[^\u4e00-\u9fa5^a-z^A-Z^0-9]', '', s) # s = re.sub('[^\u0000-\uFFFF]', '', s) # s = re.sub('[a-zA-Z]', '', s) # s = re.sub('^\d+(\.\d+)?$', '', s) comp = re.compile('[^A-Z^a-z^0-9^\u4e00-\u9fa5]') return comp.sub('', content)
def fenci(text):
'''
利用jieba进行分词
参数 text:需要分词的句子或文本
return:分词结果
'''
# 添加自定义字典
jieba.load_userdict('/home/aistudio/MyData/add_words.txt')
seg = jieba.lcut(text, cut_all = False)
return seg
def stopwordslist(file_path):
'''
创建停用词表
参数 file_path:停用词文本路径
return:停用词list
'''
stopwords = [line.strip() for line in open(file_path, encoding='utf8').readlines()]
return stopwords
def movestopwords(sentence, stopwords, counts): ''' 去除停用词,统计词频 参数 file_path:停用词文本路径 stopwords:停用词list counts: 词频统计结果 return:None ''' out = [] for word in sentence: if word not in stopwords: if len(word) != 1: if word == '虞书' or word == '欣虞书' or word == '欣书' : word = '虞书欣' elif word == '喻言冲' or word == '投喻言' or word == '言喻' : word = '喻言' elif word == '谢可寅谢' or word == '可寅谢' or word == '可寅': word = '谢可寅' elif word == '孔' or word== '雪儿' : word = '孔雪儿' counts[word] = counts.get(word, 0) + 1
def drawcounts(counts, num): ''' 绘制词频统计表 参数 counts: 词频统计结果 num:绘制topN return:none ''' x_aixs=[] y_aixs=[] c_order=sorted(counts.items(), key=lambda x:x[1],reverse=True) for c in c_order[:num]: x_aixs.append(c[0]) y_aixs.append(c[1]) matplotlib.rcParams['font.sans-serif']=['SimHei'] matplotlib.rcParams['axes.unicode_minus']=False plt.bar(x_aixs, y_aixs) plt.title('词频统计结果') plt.show()
def drawcloud(word_f): ''' 根据词频绘制词云图 参数 word_f:统计出的词频结果 return:none ''' cloud_mask=np.array(Image.open('/home/aistudio/MyData/brk1_w.jpg')) st=set(['东西', '这是']) wc=WordCloud(background_color='white', mask=cloud_mask, max_words=150, font_path='simhei.ttf', min_font_size=10, max_font_size=100, width=400, relative_scaling=0.3, stopwords=st) wc.fit_words(word_f) wc.to_file('/home/aistudio/MyData/pic.png')
def text_detection(test_text,file_path): ''' 使用hub对评论进行内容分析 return:分析结果 ''' porn_detection_lstm = hub.Module(name='porn_detection_lstm') f = open('/home/aistudio/MyData/aqy.txt', 'r', encoding='utf8') for line in f: if len(line.strip()) == 1: # 判断评论长度是否为1 continue else: test_text.append(line) f.close() input_dict = {'text': test_text} results = porn_detection_lstm.detection(data=input_dict, use_gpu=False, batch_size=1) # print(requests) for index,item in enumerate(results): if item['porn_detection_key'] == 'porn': print(item['text'],':',item['porn_probs'])
''' 评论是多分页的,得多次请求爱奇艺的评论接口才能获取多页评论,有些评论含有表情、特殊字符之类的 num 是页数,一页10条评论,假如爬取1000条评论,设置num=100 ''' if __name__ == "__main__": num = 105 lastId = '0' # 接口分页id arr = [] # 爬取所有评论存放的数组 with open('/home/aistudio/MyData/aqy.txt', 'a', encoding='utf8') as f: # 追加写文件 for i in range(num): lastId = saveMovieInfoToFile(lastId,arr) # print(i) time.sleep(0.5) # 频繁访问爱奇艺接口可能会报错,睡眠0.5秒 for item in arr: Item = clear_special_char(item) if Item.strip() != '': try: f.write(Item+'\n') # print(Item) except Exception as e: print("含有特殊字符") print("\n**********共爬取评论***********:",len(arr)) f = open("/home/aistudio/MyData/aqy.txt",'r',encoding='utf8') counts = {} for line in f: words = fenci(line) stopwords = stopwordslist('/home/aistudio/MyData/cn_stopwords.txt') movestopwords(words,stopwords,counts) drawcounts(counts,10) # 绘制top10 高频词 drawcloud(counts) # 绘制词语 f.close() ''' 使用hub对评价内容进行分析 ''' file_path = '/home/aistudio/MyData/aqy.txt' test_text = [] text_detection(test_text,file_path)
加油吧!咸鱼。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。