赞
踩
import warnings
warnings.filterwarnings('ignore',category=UserWarning)
import nltk.tokenize as tk
import nltk.corpus as nc
import nltk.stem.snowball as sb
import gensim.models.ldamodel as gm
import gensim.corpora as gc
doc = []
with open('/Users/youkechaung/Desktop/算法/数据分析/AI/day02/day02/data/topic.txt','r') as f:
for line in f.readlines():
doc.append(line[:-1])
#除去了换行符
tokenizer = tk.RegexpTokenizer(r'\w+')
#建立一个正则表达式的分词器,以单词的形式进行拆词,但是不要标点符号
#the as 等词为stopwords,高频但是不重要
stopwords = nc.stopwords.words('english')
stemmer = sb.SnowballStemmer('english')
lines_tokens = []
for line in doc:
tokens = tokenizer.tokenize(line.lower())
line_tokens = []
for token in tokens:
if token not in stopwords:
token = stemmer.stem(token)
line_tokens.append(token)
lines_tokens.append(line_tokens)
dic = gc.Dictionary(lines_tokens)
bow = []
for line_tokens in lines_tokens:
row = dic.doc2bow(line_tokens)
bow.append(row)
print(bow)
n_topics = 2
#隐含狄利克雷分布,主题建模器
model = gm.LdaModel(bow,num_topics=n_topics,id2word=dic,passes=25)
topics = model.print_topics(num_topics=n_topics,num_words=4)
print(topics)
![](https://csdnimg.cn/release/blogv2/dist/pc/img/newCodeMoreWhite.png)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。