from tokenizer_tools import make_single_dataset
import os, torch
from torch_geometric.data import Data, InMemoryDataset
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from utils.tokenizer_tools import Tokenizer


def make_predict_data(data):
    data_list = [{"session_id": i["session_id"], "word_id": i["word_id"], "label": i["sentence_type"],
                  "category": i["word_type"]} for i in data]
    # 创建DataFrame并为其分配列名
    group = pd.DataFrame(data_list, columns=["session_id", "word_id", "label", "category"])
    word_local_index = LabelEncoder().fit_transform(group.word_id)
    group = group.reset_index(drop=True)
    group["word_local_index"] = word_local_index
    node_features = group.loc[group.session_id == 0, ["word_local_index", "word_id"]].sort_values(
        "word_local_index").word_id.drop_duplicates().values
    node_features = torch.LongTensor(node_features).unsqueeze(1)
    target_nodes = group.word_local_index.values[1:]
    source_nodes = group.word_local_index.values[:-1]
    edge_index = torch.tensor([source_nodes, target_nodes], dtype=torch.long)
    x = node_features
    y = torch.FloatTensor([1])
    data = Data(x=x, edge_index=edge_index, y=y)
    return data


MODEL_PATH = "D:/code/models/gnn/"
model = torch.load(os.path.join(MODEL_PATH, 'Net_{}.pth').format(75))
model = model.to("cuda")


def predict(text):
    datas = Tokenizer.predict_words(text)
    data = make_predict_data(datas)
    data.batch= torch.zeros(len(data.x)).long()
    # print(data.x,data.batch,data.edge_index)
    model.eval()
    data = data.to("cuda")
    ouput = model(data)
    # print(ouput)
    return ouput


text = "Students should learn to look up new words in dictionaries."
output = predict(text)
print(output)
