赞
踩
这里是老师给的新闻数据集,里面有5个类别的新闻数据,我以cars这一类为例。
将csv格式的数据文件转换为txt格式,具体步骤格式转换。
以下代码只是提示一下raw的具体内涵,完整代码最后附上。
raw = pd.read_csv("D:/download/文本挖掘/newsdata/cars_source.txt", names=['txt'], sep='aaa', encoding="GBK")
print(len(raw)) # 查看整个cars_source.txt文档共有几行
由于数据集内没有给定的词典,我就自己写了一个dictwords.txt(没有涵盖所有的特定短语),将一些特定的词汇比如“道路运输”写入词典内,在分词后,“道路运输”将不会被分成“道路/运输”。
def dict_words(raw):
dict = 'D:/download/文本挖掘/newsdata/dictwords.txt' #自定义词典的路径
jieba.load_userdict(dict) # 增加词典(自己写的)
for i in range(len(raw)): # 分词时不是对整个文档进行操作,而是一行一行地分词,所以需要遍历每一行进行分词
print('/'.join(jieba.cut(raw.txt[i]))) # 用“/”作为词与词的分隔符
def dele_words(raw):
ana.set_stop_words('D:/download/文本挖掘/newsdata/stopwords.txt') # 添加停用词文档
words_list = [] # 定义一个空列表
for i in range(len(raw)):
#print(ana.extract_tags(raw.txt[i]))
lists = ana.extract_tags(raw.txt[i])
#print(lists)
words_list.extend(lists) # 将每一行的分词结果加入一个大列表中
return words_list
def cal_words(words_list):
df = pd.DataFrame(words_list, columns=['word']) # 这一列叫 word
result =df.groupby(['word']).size()
freqlist = result.sort_values(ascending=False) # 降序排列
print(freqlist[:20]) # 取出现次数最多的前20个词
def pic_words(words_list): fdist = FreqDist(words_list) # 将列表转化为字典类型,生成完整的词条频数字典 pic = imageio.imread('D:/pyhomework/0516/dig1/timg.jpg') # 读取背景图片(背景颜色必须为纯白色) pic_color = wordcloud.ImageColorGenerator(pic) # ImageColorGenerator函数将词云的颜色参数设置为根据图片颜色确定 cloudobj = wordcloud.WordCloud(font_path = "simkai.ttf", #这里不能为None,否则词云图全是方块 height = 1000, width = 2000, mode = "RGBA", background_color = 'white', mask = pic, color_func = pic_color, # 我导入图片主色调为蓝灰色 ).fit_words(fdist) # 基于频数的绘制函数 plt.imshow(cloudobj) plt.axis("off") # 关闭坐标轴 plt.show() cloudobj.to_file("D:/pyhomework/0516/dig1/pic_final.png") # 保存为高精度图形
import pandas as pd import wordcloud import matplotlib.pyplot as plt #绘制图片 import jieba import jieba.analyse as ana import imageio from nltk import FreqDist #使用自定义词典 def dict_words(raw): dict = 'D:/download/文本挖掘/newsdata/dictwords.txt' jieba.load_userdict(dict) #增加词典(自己写的) for i in range(len(raw)): print('/'.join(jieba.cut(raw.txt[i]))) #停用词 def dele_words(raw): ana.set_stop_words('D:/download/文本挖掘/newsdata/stopwords.txt') words_list = [] for i in range(len(raw)): #print(ana.extract_tags(raw.txt[i])) lists = ana.extract_tags(raw.txt[i]) #print(lists) words_list.extend(lists) return words_list #词频统计 def cal_words(words_list): df = pd.DataFrame(words_list, columns=['word']) result =df.groupby(['word']).size() #print(type(result)) freqlist = result.sort_values(ascending=False) #降序 print(freqlist[:20]) #绘制词云图 def pic_words(words_list): fdist = FreqDist(words_list) #pic_address = path.abspath('timg.jpg'), pic = imageio.imread('D:/pyhomework/0516/dig1/timg.jpg') # 读取图片 pic_color = wordcloud.ImageColorGenerator(pic) # ImageColorGenerator函数将词云的颜色参数设置为根据图片颜色确定 cloudobj = wordcloud.WordCloud(font_path = "simkai.ttf", height = 1000, width = 2000, mode = "RGBA", background_color = 'white', mask = pic, color_func = pic_color, #图片主色调为蓝灰色 ).fit_words(fdist) plt.imshow(cloudobj) plt.axis("off") plt.show() #cloudobj.to_file("D:/pyhomework/0516/dig1/pic1.png") 保存的图片为1:20行 cloudobj.to_file("D:/pyhomework/0516/dig1/pic_final.png") if __name__ == '__main__': raw = pd.read_csv("D:/download/文本挖掘/newsdata/cars_source.txt", names=['txt'], sep='aaa', encoding="GBK") print(len(raw)) dict_words(raw) words_list = dele_words(raw) cal_words(words_list) pic_words(words_list)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。