赞
踩
又是帮着做作业。用Python实现了一个简单的知识图谱小demo。非常感谢这篇文章,对这篇文章做了一个复现,并记录遇到的问题。
开发工具:jupyter notebook
开发环境:
Python 3.7
en_core_web_sm 2.3.0
spacy 2.2.5
数据集在文章中有,自行下载。
# coding: utf-8 # In[11]: import re import pandas as pd import bs4 import requests import spacy from spacy import displacy nlp = spacy.load('en_core_web_sm') from spacy.matcher import Matcher from spacy.tokens import Span import networkx as nx import matplotlib.pyplot as plt from tqdm import tqdm pd.set_option('display.max_colwidth', 200) get_ipython().run_line_magic('matplotlib', 'inline') # In[12]: candidate_sentences = pd.read_csv("wiki_sentences_v2.csv") candidate_sentences.shape # In[13]: candidate_sentences['sentence'].sample(5) # In[14]: doc = nlp("the drawdown process is governed by astm standard d823") for tok in doc: print(tok.text, "...", tok.dep_) # In[16]: def get_entities(sent): ## chunk 1 # 我在这个块中定义了一些空变量。prv tok dep和prv tok text将分别保留句子中前一个单词和前一个单词本身的依赖标签。前缀和修饰符将保存与主题或对象相关的文本。 ent1 = "" ent2 = "" prv_tok_dep = "" # dependency tag of previous token in the sentence prv_tok_text = "" # previous token in the sentence prefix = "" modifier = "" ############################################################# for tok in nlp(sent): ## chunk 2 # 接下来,我们将遍历句子中的记号。我们将首先检查标记是否为标点符号。如果是,那么我们将忽略它并转移到下一个令牌。如果标记是复合单词的一部分(dependency tag = compound),我们将把它保存在prefix变量中。复合词是由多个单词组成一个具有新含义的单词(例如“Football Stadium”, “animal lover”)。 # 当我们在句子中遇到主语或宾语时,我们会加上这个前缀。我们将对修饰语做同样的事情,例如“nice shirt”, “big house” # if token is a punctuation mark then move on to the next token if tok.dep_ != "punct": # check: token is a compound word or not if tok.dep_ == "compound": prefix = tok.text # if the previous word was also a 'compound' then add the current word to it if prv_tok_dep == "compound": prefix = prv_tok_text + " "+ tok.text # check: token is a modifier or not if tok.dep_.endswith("mod") == True: modifier = tok.text # if the previous word was also a 'compound' then add the current word to it if prv_tok_dep == "compound": modifier = prv_tok_text + " "+ tok.text ## chunk 3 # 在这里,如果令牌是主语,那么它将作为ent1变量中的第一个实体被捕获。变量如前缀,修饰符,prv tok dep,和prv tok文本将被重置。 if tok.dep_.find("subj") == True: ent1 = modifier +" "+ prefix + " "+ tok.text prefix = "" modifier = "" prv_tok_dep = "" prv_tok_text = "" ## chunk 4 # 在这里,如果令牌是宾语,那么它将被捕获为ent2变量中的第二个实体。变量,如前缀,修饰符,prv tok dep,和prv tok文本将再次被重置。 if tok.dep_.find("obj") == True: ent2 = modifier +" "+ prefix +" "+ tok.text ## chunk 5 # 一旦我们捕获了句子中的主语和宾语,我们将更新前面的标记和它的依赖标记。 # update variables prv_tok_dep = tok.dep_ prv_tok_text = tok.text ############################################################# return [ent1.strip(), ent2.strip()] get_entities('the film had 200 patents') # In[17]: entity_pairs = [] for i in tqdm(candidate_sentences["sentence"]): entity_pairs.append(get_entities(i)) entity_pairs[10:20] # In[19]: def get_relation(sent): doc = nlp(sent) # Matcher class object matcher = Matcher(nlp.vocab) #define the pattern pattern = [{'DEP':'ROOT'}, {'DEP':'prep','OP':"?"}, {'DEP':'agent','OP':"?"}, {'POS':'ADJ','OP':"?"}] matcher.add("matching_1", None, pattern) matches = matcher(doc) k = len(matches) - 1 span = doc[matches[k][1]:matches[k][2]] return(span.text) get_relation("John completed the task") # In[20]: relations = [get_relation(i) for i in tqdm(candidate_sentences['sentence'])] pd.Series(relations).value_counts()[:50] # In[21]: # extract subject source = [i[0] for i in entity_pairs] # extract object target = [i[1] for i in entity_pairs] kg_df = pd.DataFrame({'source':source, 'target':target, 'edge':relations}) # In[22]: # create a directed-graph from a dataframe G=nx.from_pandas_edgelist(kg_df, "source", "target", edge_attr=True, create_using=nx.MultiDiGraph()) # In[23]: plt.figure(figsize=(12,12)) pos = nx.spring_layout(G) nx.draw(G, with_labels=True, node_color='skyblue', edge_cmap=plt.cm.Blues, pos = pos) plt.show() # In[24]: G=nx.from_pandas_edgelist(kg_df[kg_df['edge']=="composed by"], "source", "target", edge_attr=True, create_using=nx.MultiDiGraph()) plt.figure(figsize=(12,12)) pos = nx.spring_layout(G, k = 0.5) # k regulates the distance between nodes nx.draw(G, with_labels=True, node_color='skyblue', node_size=1500, edge_cmap=plt.cm.Blues, pos = pos) plt.show() # In[25]: G=nx.from_pandas_edgelist(kg_df[kg_df['edge']=="written by"], "source", "target", edge_attr=True, create_using=nx.MultiDiGraph()) plt.figure(figsize=(12,12)) pos = nx.spring_layout(G, k = 0.5) nx.draw(G, with_labels=True, node_color='skyblue', node_size=1500, edge_cmap=plt.cm.Blues, pos = pos) plt.show() # In[26]: G=nx.from_pandas_edgelist(kg_df[kg_df['edge']=="released in"], "source", "target", edge_attr=True, create_using=nx.MultiDiGraph()) plt.figure(figsize=(12,12)) pos = nx.spring_layout(G, k = 0.5) nx.draw(G, with_labels=True, node_color='skyblue', node_size=1500, edge_cmap=plt.cm.Blues, pos = pos) plt.show()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。