赞
踩
如今的大数据时代,互联网信息的实时交流导致产生了许多各种各样的信息,例如我们如果最常见到的信息之一----------评论,通过相关的预处理提取出我们想要的关键信息。具体代码如下
-
- import jieba
- import os
- # 创建停用词list
- def stopwordslist(filepath):
- stopwords = [line.strip() for line in open("C:/Users/JayDen/Desktop/stopwords.txt", 'r').readlines()]
- return stopwords
- def seg_sentence(sentence):
- sentence_seged = jieba.cut(sentence.strip())
- stopwords = stopwordslist('C:/Users/JayDen/Desktop/stopwords.txt') # 这里加载停用词的路径
- outstr = ''
- for word in sentence_seged:
- if word not in stopwords:
- if word != '\t':
- outstr += word
- outstr += " "
- return outstr
- filePath = 'C:/Users/JayDen/Desktop/预处理评论数据/'
- filelist=os.listdir(filePath)
- for i in filelist:
- inputs = open('C:/Users/JayDen/Desktop/预处理评论数据/'+i, 'r',encoding="utf-8") #加载要处理的文件的路径
- outputs = open('C:/Users/JayDen/Desktop/去停用词及分词操作后数据/(分词及去停用词)'+i, 'w',encoding="utf-8") #加载处理后的文件路径
- for line in inputs:
- line_seg = seg_sentence(line) # 这里的返回值是字符串
- outputs.write(line_seg)
- outputs.close()
- inputs.close()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。