import json
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import math
import torch.nn as nn
import tqdm

train_Data=r"C:\Users\27437\Desktop\医疗对话分析\dateset.txt"

with open(train_Data,'r',encoding='utf-8') as f:
    file=f.readlines()
    train_list = [i.replace("\n","") for i in file]


#构建词典
word2index={'pad':0}
index2word={'pad':0}
dict_list=[]
def dict(x):
    for sen in x:
        for j in sen:
            if j not in dict_list:
                dict_list.append(j)
    for num,data in enumerate(tqdm.tqdm(dict_list)):
        word2index[data]=num+1
        index2word[num+1]=data



def Token(x):
    token=[]
    for sen in x:
        p=[]
        for j in sen:
            if j in word2index:
                p.append(word2index[j])
        token.append(p)
    return token

def padding(data):
    for i in data:
        for j in range(200-len(i)):
                i.append(0)
    return data

# 定义位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, emb_dim, dropout, max_len=5000):
        super().__init__()
        self.dropout = nn.Dropout(dropout)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, emb_dim, 2) * -(math.log(10000.0) / emb_dim))
        pe = torch.zeros(max_len, 1, emb_dim)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)
    def forward(self, x):
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)

class TransformEncoder(nn.Module):
    def __init__(self, embedding_dim, dropout, output_dim, n_heads, hid_dim, n_layers):
        super(TransformEncoder, self).__init__()
        self.embedding = nn.Embedding(2550,embedding_dim)
        self.pos_encoding = PositionalEncoding(embedding_dim, dropout)
        self.transformer_encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(embedding_dim, n_heads, hid_dim, dropout,batch_first=True), n_layers
        )
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(embedding_dim, output_dim)

    def forward(self, src):
        embedded = self.embedding(src)
        embedded = self.pos_encoding(embedded.transpose(0, 1)).transpose(0, 1)
        outputs = self.transformer_encoder(embedded)
        last_output = self.dropout(outputs)
        output = self.fc(last_output)
        return output



EMB_DIM = 768
HID_DIM = 256
N_LAYERS = 6
N_HEADS = 12
DROPOUT = 0.2
LEARNING_RATE = 0.00001
OUTPUT_DIM = 768
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = TransformEncoder(EMB_DIM, DROPOUT, OUTPUT_DIM, N_HEADS, HID_DIM, N_LAYERS)
model = model.to(device)
optimizer = optim.Adam(model.parameters(),lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss(ignore_index=0)
criterion=criterion.to(device)

if __name__ == '__main__':
    # 创建一个空字典
    action_dict = {}
    index2action={}
    # 定义一个列表来保存所有的动作类型
    actions = [
        "Request-Symptom",
        "Inform-Symptom",
        "Request-Etiology",
        "Inform-Etiology",
        "Request-Basic_Information",
        "Inform-Basic_Information",
        "Request-Existing_Examination_and_Treatment",
        "Inform-Existing_Examination_and_Treatment",
        "Request-Drug_Recommendation",
        "Inform-Drug_Recommendation",
        "Request-Medical_Advice",
        "Inform-Medical_Advice",
        "Request-Precautions",
        "Inform-Precautions",
        "Diagnose",
        "Other"
    ]

    # 遍历列表，将每个动作类型作为键，索引作为值（从0开始）
    for index, action in enumerate(actions):
        action_dict[action] = index
    for index,action in enumerate(actions):
        index2action[index]=action
    # print(index2action)
    label=[]
    with open("label.txt","r",encoding='utf-8') as file:
        f=file.readlines()
        for i in f:
            i=i.replace("\n","")
            label.append(action_dict[i])
    # 制作词典
    dict(train_list)
    token = Token(train_list)
    token = padding(token)

    # 搭建数据集
    class DATASET(Dataset):
        def __init__(self, data, label):
            self.data = torch.tensor(data)
            self.label = torch.tensor(label)

        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            return self.data[idx], self.label[idx]

    dataset = DATASET(token, label)
    dataloader = DataLoader(dataset, batch_size=64, shuffle=True,drop_last=True)  # 有22个样本舍弃
    # for i in dataloader:
    #     p,o=i
    #     print(len(p))
    # print(word2index)

    for epoch in range(1000):  # 一般训练几个epoch，即遍历几次数据集
        running_loss = 0.0
        for i in tqdm.tqdm(dataloader):
            # 清零参数梯度
            optimizer.zero_grad()
            src, tgt = i
            src = src.to(device)
            tgt = tgt.to(device)
            # 前向传播，后向传播，优化步骤
            output = model(src)
            loss = criterion(output.view(64, -1), tgt) # 计算损失  unsqueeze是扩张，squeeze是压缩
            loss.backward()# 后向传播，计算梯度
            optimizer.step()  # 更新权重，进行优化步骤
            running_loss += loss.item()  # 计算总损失

        print(f'Epoch {epoch + 1}, Loss: {running_loss / (len(label)-3)}')  # 打印每个epoch的损失
        if ((epoch + 1) % 100 == 0):
            torch.save(model.state_dict(), str(epoch)+'.pth')



    '''验证集展示'''
    dev_data="dev_dataset.txt"
    dev_label="dev_label.txt"

    with open(dev_data, 'r', encoding='utf-8') as f:
        file = f.readlines()
        dev_list = [i.replace("\n", "") for i in file]

    dev_token = Token(dev_list)
    dev_tokens = padding(dev_token)

    dev_labels = []
    with open(dev_label, "r", encoding='utf-8') as file:
        f = file.readlines()
        for i in f:
            i = i.replace("\n", "")
            dev_labels.append(action_dict[i])

        # 搭建数据集


    class Dev_DATASET(Dataset):
        def __init__(self, data):
            self.data = torch.tensor(data)


        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            return self.data[idx]

    dev_dataset = Dev_DATASET(dev_tokens)
    dev_dataloader = DataLoader(dev_dataset, batch_size=64)  # 有22个样本舍弃

    model.load_state_dict(torch.load('599.pth'))
    model.eval()
    with torch.no_grad():
        predict_label = []
        for i in tqdm.tqdm(dev_dataloader):
            i = i.to(device)
            output = model(i)
            output = output.view(64, -1)
            predicted_index = torch.argmax(output, dim=1)
            predicted_index = predicted_index.cpu()
            predicted_index = predicted_index.numpy()
            predict_label.append(predicted_index.tolist())  # 输出相等的元素

        predict_label = [item for sublist in predict_label for item in sublist]
        predict_True = []
        # print(predict_True_label)
        for pre, true in zip(predict_label,  dev_labels):
            if pre == true:
                predict_True.append(pre)
        print("准确率：" + str(len(predict_True) / 24839)) #准确率：0.6503885019525746
        print(predict_label[:200])
        print(dev_labels[:200])

    test_file="test_dataset.txt"
    with open(test_file, 'r', encoding='utf-8') as f:
        file = f.readlines()
        test_list = [i.replace("\n", "") for i in file]
        test_token = Token(test_list)
        test_tokens = padding(test_token)
    class Test_DATASET(Dataset):
        def __init__(self, data):
            self.data = torch.tensor(data)


        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            return self.data[idx]

    dev_dataset = Test_DATASET(test_tokens)
    dev_dataloader = DataLoader(dev_dataset, batch_size=64)  # 有22个样本舍弃

    model.load_state_dict(torch.load('599.pth'))
    model.eval()
    with torch.no_grad():
        predict_label = []
        for i in tqdm.tqdm(dev_dataloader):
            i = i.to(device)
            output = model(i)
            output = output.view(64, -1)
            predicted_index = torch.argmax(output, dim=1)
            predicted_index = predicted_index.cpu()
            predicted_index = predicted_index.numpy()
            predict_label.append(predicted_index.tolist())  # 输出相等的元素

        predict_label = [item for sublist in predict_label for item in sublist]
        predict_act=[index2action[int(i)] for i in predict_label if i in index2action]

        test_file="IMCS-DAC/IMCS-DAC_test.json"
        with open(test_file, 'r', encoding='utf-8') as f:
            test_data = json.load(f)
            key = [i for i in test_data]
            value = [test_data[i] for i in key]
            count=0
            predict_act_len=len(predict_act)
            for list_simple in value:
                for dict_simple in list_simple:
                    if count <= predict_act_len-1:
                        dict_simple["dialogue_act"]=predict_act[count]
                        count+=1
        with open(test_file, 'w', encoding='utf-8') as json_file:
            json.dump(test_data, json_file,ensure_ascii=False, indent=2)

























