赞
踩
- 实验代码:
-
- # -*- coding: utf-8 -*-
- import math
- import jieba
- import jieba.posseg as psg
- from gensim import corpora, models
- from jieba import analyse
- import functools
-
-
-
- def get_stopword_list():
- stop_word_path = 'stop_words.utf8'
- stopword_list = [sw.replace('\n', '') for sw in open(stop_word_path, encoding='utf-8').readlines()]
- return stopword_list
-
-
- # 分词方法
- def seg_to_list(sentence, pos=False):
- if not pos:
- # 不进行词性标注的分词方法
- seg_list = jieba.cut(sentence)
- else:
- # 进行词性标注的分词方法
- seg_list = psg.cut(sentence)
- return seg_list
-
-
- # 去除干扰词,根据pos判断是否过滤除名词外的其他词性,再判断词是否在停用词表中,长度是否大于等于2等。
- def word_filter(seg_list, pos=False):
- stopword_list = get_stopword_list()
- filter_list = []
- # 根据pos参数选择是否词性过滤
- # 不进行词性过滤,则将词性都标记为n,表示全部保留
- for seg in seg_list:
- if not pos:
- word = seg
- flag = 'n'
- else:
- word = seg.word
- flag = seg.flag
- if not flag.startswith('n'):
- continue
- # 过滤高停用词表中的词,以及长度为<2的词
- if word not in stopword_list and len(word) > 1:
- filter_list.append(word)
- return filter_list
-
-
- # 数据加载
- def load_data(pos=False, corpus_path='corpus.txt'):
- doc_list = []
- for line in open(corpus_path, 'r', encoding='utf-8'):
- content = line.strip()
- seg_list = seg_to_list(content, pos)
- filter_list = word_filter(seg_list, pos)
- doc_list.append(filter_list)
- return doc_list
-
-
- # topK
- def cmp(e1, e2):
- import numpy as np
- res = np.sign(e1[1] - e2[1])
- if res != 0:
- return res
- else:
- a = e1[0] + e2[0]
- b = e2[0] + e1[0]
- if a > b:
- return 1
- elif a == b:
- return 0
- else:
- return -1
-
-
- # 主题模型
- class TopicModel(object):
- def __init__(self, doc_list, keyword_num, model="LSI", num_topics=4):
- # 使用gensim接口,将文本转为向量化表示
- self.dictionary = corpora.Dictionary(doc_list)
- # 使用BOW模型向量化
- corpus = [self.dictionary.doc2bow(doc) for doc in doc_list]
- # 对每个词,根据tf-idf进行加权,得到加权后的向量表示
- self.tfidf_model = models.TfidfModel(corpus)
- self.corpus_tfidf = self.tfidf_model[corpus]
-
- self.keyword_num = keyword_num
- self.num_topics = num_topics
- # 选择加载的模型
- if model == 'LSI':
- self.model = self.train_lsi()
- else:
- self.model = self.train_lda()
- # 得到数据集的主题-词分布
- word_dic = self.word_dictionary(doc_list)
- self.wordtopic_dic = self.get_wordtopic(word_dic)
-
- def train_lsi(self):
- lsi = models.LsiModel(self.corpus_tfidf, id2word=self.dictionary, num_topics=self.num_topics)
- return lsi
-
- def train_lda(self):
- lda = models.LdaModel(self.corpus_tfidf, id2word=self.dictionary, num_topics=self.num_topics)
- return lda
-
- def get_wordtopic(self, word_dic):
- wordtopic_dic = {}
- for word in word_dic:
- single_list = [word]
- wordcorpus = self.tfidf_model[self.dictionary.doc2bow(single_list)]
- wordtopic = self.model[wordcorpus]
- wordtopic_dic[word] = wordtopic
- return wordtopic_dic
-
- # 词空间构建方法和向量化方法,在没有gensim接口时的一般处理方法
- def word_dictionary(self, doc_list):
- dictionary = []
- for doc in doc_list:
- dictionary.extend(doc)
-
- dictionary = list(set(dictionary))
-
- return dictionary
-
- def doc2bowvec(self, word_list):
- vec_list = [1 if word in word_list else 0 for word in self.dictionary]
- return vec_list
-
- # 计算词的分布和文档的分布的相似度,取相似度最高的keyword_num个词作为关键词
- def get_simword(self, word_list):
- sentcorpus = self.tfidf_model[self.dictionary.doc2bow(word_list)]
- senttopic = self.model[sentcorpus]
-
- # 余弦相似度计算
- def calsim(l1, l2):
- a, b, c = 0.0, 0.0, 0.0
- for t1, t2 in zip(l1, l2):
- x1 = t1[1]
- x2 = t2[1]
- a += x1 * x1
- b += x1 * x1
- c += x2 * x2
- sim = a / math.sqrt(b * c) if not (b * c) == 0.0 else 0.0
- return sim
-
- # 计算输入文本和每个词的主题分布相似度
- sim_dic = {}
- for k, v in self.wordtopic_dic.items():
- if k not in word_list:
- continue
- sim = calsim(v, senttopic)
- sim_dic[k] = sim
-
- for k, v in sorted(sim_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:
- print(k + "/ ", end='')
- print()
-
-
- def topic_extract(word_list, model, pos=False, keyword_num=10):
- doc_list = load_data(pos)
- topic_model = TopicModel(doc_list, keyword_num, model=model)
- topic_model.get_simword(word_list)
-
-
- if __name__ == '__main__':
- text = '''记者从国家文物局获悉,截至3月15日,19个省(区、市) 180多家博物馆在做好夜情防控工
- 作的前提下恢复对外开放,其中19家为一级博物馆。
- 另外,沈阳故宫博物院、新四军江南指挥部纪念馆、金沙遗址博物馆等将于3月17日陆续恢复开放。
- 随着疫情防控形势好转,各地博物馆、纪念馆等陆续恢复开放。记者从各恢复开放博物馆发布的公告获悉,
- 各恢复开放博物馆对疫情防控期间参观观众在提前预约、测量体温等提出了明确要求,并提醒观众做好个人防护。
- 2月27日,国家文物局发布《关于新冠肺炎疫情防控期间有序推进文博单位恢复开放和复工的指导意见》
- 强调,有序恢复开放文物、博物馆单位,各文物、博物馆开放单位可采取网上实名预约、总量控制、分时
- 分流、语音讲解、数字导览等措施,减少人员聚集。'''
-
-
- pos = True
- seg_list = seg_to_list(text, pos)
- filter_list = word_filter(seg_list, pos) # 返回的是一个没有停用词 并且长度>2的词的list
-
- print('LSI模型结果:')
- topic_extract(filter_list, 'LSI', pos)
- print('LDA模型结果:')
- topic_extract(filter_list, 'LDA', pos)
-
实验结果:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。