import spacy
import networkx as nx
nlp = spacy.load("en_core_web_sm")

# text = u'Convulsions that occur after DTaP are caused by a fever.'
text = u'People have been moving back into downtown.'
# text = u'The child was carefully wrapped and bound into the cradle by means of a cord.'
entity1 = 'People'.lower()
entity2 = 'downtown'
doc = nlp(text)

print('sentence:',format(doc))
# Load spacy's dependency tree into a networkx graph
edges = []
for token in doc:
    for child in token.children:
        edges.append(('{0}'.format(token.lower_),
                      '{0}'.format(child.lower_)))
graph = nx.Graph(edges)
# Get the length and path
print('shortest path lenth: ',nx.shortest_path_length(graph, source=entity1, target=entity2))
print('shortest path: ',nx.shortest_path(graph, source=entity1, target=entity2))


text = u"The <e1>cultivation</e1> consisted of plowing the crop with a double-shovel <e2>plow</e2>."


import json
import re
import spacy
import networkx as nx
from nltk.tokenize import word_tokenize  


e1 = re.findall(r'<e1>(.*)</e1>', text)[0]  # 返回sentence中<e1></e1>之间的内容，返回形式是数组，[0]即实体名e1
e2 = re.findall(r'<e2>(.*)</e2>', text)[0]


text = text.replace('<e1>','')
text = text.replace('</e1>','')
text = text.replace('<e2>','')
text = text.replace('</e2>','')
text


text = text.replace(e1, 'entity1',1)
text = text.replace(e2, 'entity2',1)
text


doc = nlp(text)

print('sentence:',format(doc))
# Load spacy's dependency tree into a networkx graph
edges = []
for token in doc:
    for child in token.children:
        edges.append(('{0}'.format(token.lower_),
                      '{0}'.format(child.lower_)))
graph = nx.Graph(edges)
# Get the length and path
print('shortest path lenth: ',nx.shortest_path_length(graph, source='entity1', target='entity2'))
print('shortest path: ',nx.shortest_path(graph, source='entity1', target='entity2'))
text = nx.shortest_path(graph, source='entity1', target='entity2')


text



