import pandas as pd


train_data = pd.read_csv("df_train_drug")


print(train_data["label"].value_counts())








import spacy
import networkx as nx


nlp = spacy.load("en_core_web_sm")


# doc = nlp("JIngbo who dresses a green T-shirt was instructed by Chen.")
doc = nlp("drug1 alone had no effect on tyrosine phosphorylation in T24 cells, but dose-dependently inhibits the effects of drug2 when both are added simultaneously.")


for token in doc:
    print((token.head.text, token.text, token.dep_))


#spacy.displacy.serve(doc, style='dep')


edges = []
for token in doc:
    for child in token.children:
        edges.append(('{0}'.format(token.lower_),
                      '{0}'.format(child.lower_)))
graph = nx.Graph(edges)
entity1 = "drug1".lower()
entity2 = 'drug2'.lower()
print('shortest path lenth: ',nx.shortest_path_length(graph, source=entity1, target=entity2))
print('shortest path: ',nx.shortest_path(graph, source=entity1, target=entity2))





#根据条件读取某行
# 读取第label列中小于4的值
train_data_pos = train_data.loc[ train_data.label < 4] #等价于 data5 = data[data.B > 6]


train_data_pos


#存为一个新的csv文件
train_data_pos.to_csv("train_data_pos")


data_pos = pd.read_csv("train_data_pos")
data_pos





import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt


# 在训练数据中添加新的句子长度列, 每个元素的值都是对应的句子列的长度
data_pos["sentence_length"] = list(map(lambda x: len(x), data_pos["text"]))


# # 绘制句子长度列的数量分布图
# sns.countplot("sentence_length", data=data_pos)
# # 主要关注count长度分布的纵坐标, 不需要绘制横坐标, 横坐标范围通过dist图进行查看
# plt.xticks([])
# plt.show()



from transformers import AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("minhpqn/bio_roberta-base_pubmed")


data_pos["text"].loc[1]


result = tokenizer('Synergism was observed when <e13> drug1 </e13> was combined with <e20> drug2 </e20> against Bacillus subtilis and Klebsiella oxytoca.')


tokenizer.decode(result["input_ids"])


data_pos["word_num"] = list(map(lambda x: len(tokenizer(x)['input_ids']), data_pos["text"]))


data_pos





data_long = data_pos.loc[data_pos.word_num > 120]
data_long


str1 = data_long.loc[387].text
str1


# special_toekn  = ["<e10>", "</e10>", "<e11>", "</e11>", "<e12>", "</e12>", "<e13>", "</e13>",
#                   "<e20>", "</e20>", "<e21>", "</e21>", "<e22>", "</e22>", "<e23>","</e23>"]
def del_special_tokens(text):
    special_tokens = []
    special_toekn_1  = ["<e10>", "<e11>", "<e12>", "<e13>", "<e20>", "<e21>", "<e22>", "<e23>"]
    for token in special_toekn_1:
        index = text.find(token)
        if index != -1:  # 存在该标记
            text = text[:index] + text[index + 6:]
            special_tokens.append(token)
        
    special_toekn_2  = ["</e10>", "</e11>", "</e12>", "</e13>", "</e20>", "</e21>", "</e22>", "</e23>"]
    for token in special_toekn_2:
        index = text.find(token)  # 存在该标记
        if index != -1:
            text = text[:index] + text[index + 7:]
            special_tokens.append(token)
    return text, special_tokens  # 不含有 <> 和 </>


str1, special_toekns = del_special_tokens(str1)


str1





import spacy
import networkx as nx


nlp = spacy.load("en_core_web_sm")


# doc = nlp("JIngbo who dresses a green T-shirt was instructed by Chen.")
doc = nlp(str1)


for token in doc:
    print((token.head.text, token.text, token.dep_))


edges = []
for token in doc:
    for child in token.children:
        edges.append(('{0}'.format(token.lower_),
                      '{0}'.format(child.lower_)))
graph = nx.Graph(edges)
entity1 = "drug1".lower()
entity2 = 'drug2'.lower()
print('shortest path lenth: ',nx.shortest_path_length(graph, source=entity1, target=entity2))
print('shortest path: ',nx.shortest_path(graph, source=entity1, target=entity2))


sdp = nx.shortest_path(graph, source=entity1, target=entity2)


sdp


special_toekns


sdp.insert(0, special_toekns[0])
sdp.insert(-1, special_toekns[1])
sdp.insert(2, special_toekns[2])
sdp.append(special_toekns[3])


sdp


str2 = " ".join(sdp)


str2



