import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
import math
import torch.nn as nn
import pandas as pd
import tqdm

train_Data="dateset.txt"

with open(train_Data,'r',encoding='utf-8') as f:
    file=f.readlines()
    train_list = [i.replace("\n","") for i in file]


#构建词典
word2index={'pad':0}
index2word={'pad':0}
dict_list=[]
def dict(x):
    for sen in x:
        for j in sen:
            if j not in dict_list:
                dict_list.append(j)
    for num,data in enumerate(tqdm.tqdm(dict_list)):
        word2index[data]=num+1
        index2word[num+1]=data



def Token(x):
    token=[]
    for sen in x:
        p=[]
        for j in sen:
            if j in word2index:
                p.append(word2index[j])
        token.append(p)
    return token

def padding(data):
    for i in data:
        for j in range(200-len(i)):
                i.append(0)
    return data



d_model = 768  # Embedding Size
d_ff = 2048  # FeedForward dimension
d_k = d_v = 64  # dimension of K(=Q), V
n_layers = 6  # number of Encoder of Decoder Layer
n_heads = 12  # number of heads in Multi-Head Attention


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        '''
        x: [seq_len, batch_size, d_model]
        '''
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)


# 该掩码用于在注意力计算中忽略填充（padding）的位置。这里填充通常用 0 表示
def get_attn_pad_mask(seq_q, seq_k):
    batch_size, len_q = seq_q.size()
    batch_size, len_k = seq_k.size()
    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)
    return pad_attn_mask.expand(batch_size, len_q, len_k)


# 句子掩码
def get_attn_subsequence_mask(seq):
    attn_shape = [seq.size(0), seq.size(1), seq.size(1)]  # seq.size(1)是句子长度，以便后面self—attention自注意力计算
    subsequence_mask = np.triu(np.ones(attn_shape), k=1)
    subsequence_mask = torch.from_numpy(subsequence_mask).byte()
    return subsequence_mask


class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, Q, K, V, attn_mask):
        scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k)
        scores.masked_fill_(attn_mask, -1e9)  # 如果tensor里面true的值替换为-1e9
        attn = nn.Softmax(dim=-1)(scores)
        context = torch.matmul(attn, V)  # [batch_size, n_heads, len_q, d_v]
        return context, attn

    '''填充为-1e9而不是0的原因确实是为了在计算softmax时忽略这些位置。如果将填充位置设置为0，
      在计算softmax之前，这些位置的分数就是0。然而，在softmax函数中，任何非负数的指数都会是一个正数（即使是0的指数也是1），这意味着即使这些位置原本是应该被忽略的，
      它们在softmax之后仍可能获得一定的概率值，尽管这个值可能很小。
      当我们将填充位置设置为一个非常小的负数（如-1e9）时，在计算softmax之前，这些位置的分数是极其小的负数。当这些负数经过指数运算（e的该负数次方）后，
      结果会是一个接近0的正数，但由于其值极小，所以在softmax归一化过程中，
      这些位置几乎不会对最终结果产生任何影响。换句话说，这些位置在softmax后的概率值会接近于0，从而实现了忽略这些位置的目的。

      分数缩放（除以np.sqrt(d_k)）
      在注意力机制中，我们通常计算查询（Q）和键（K）之间的点积以得到分数（scores）。如果Q和K的维度很高（即d_k很大），
      点积的结果可能会变得非常大，导致softmax函数进入饱和区，
      从而使得梯度变得很小甚至消失，这不利于模型的训练。为了避免这个问题，我们通常会对分数进行缩放，即除以sqrt(d_k)。
      这样做可以使得分数分布更加均匀，softmax函数不会进入饱和区，从而有助于模型的学习。

      exp(n)=logN
      softmax=exp(j1)/exp(j1)+exp(j2)+...exp(jn)
      '''


class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)
        self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)
        self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)
        self.fc = nn.Linear(n_heads * d_v, d_model, bias=False)

    def forward(self, input_Q, input_K, input_V, attn_mask):
        residual, batch_size = input_Q, input_Q.size(0)
        Q = self.W_Q(input_Q).view(batch_size, -1, n_heads, d_k).transpose(1, 2)
        K = self.W_K(input_K).view(batch_size, -1, n_heads, d_k).transpose(1, 2)
        V = self.W_V(input_V).view(batch_size, -1, n_heads, d_v).transpose(1, 2)
        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1)
        context, attn = ScaledDotProductAttention()(Q, K, V, attn_mask)  # 因为python是面向对象编程
        context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v)
        output = self.fc(context)  # [batch_size, len_q, d_model]
        return nn.LayerNorm(d_model).cuda()(output + residual), attn


class PoswiseFeedForwardNet(nn.Module):
    def __init__(self):
        super(PoswiseFeedForwardNet, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(d_model, d_ff, bias=False),
            nn.ReLU(),
            nn.Linear(d_ff, d_model, bias=False)
        )

    def forward(self, inputs):
        '''
        inputs: [batch_size, seq_len, d_model]
        '''
        residual = inputs
        output = self.fc(inputs)
        return nn.LayerNorm(d_model).cuda()(output + residual)


class EncoderLayer(nn.Module):
    def __init__(self):
        super(EncoderLayer, self).__init__()
        self.enc_self_attn = MultiHeadAttention()
        self.pos_ffn = PoswiseFeedForwardNet()

    def forward(self, enc_inputs, enc_self_attn_mask):
        enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask)
        enc_outputs = self.pos_ffn(enc_outputs)
        return enc_outputs, attn


class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()
        self.src_emb = nn.Embedding(2550, d_model)
        self.pos_emb = PositionalEncoding(d_model)
        self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])

    def forward(self, enc_inputs):
        '''
        enc_inputs: [batch_size, src_len]
        '''
        enc_outputs = self.src_emb(enc_inputs)  # [batch_size, src_len, d_model]
        enc_outputs = self.pos_emb(enc_outputs.transpose(0, 1)).transpose(0, 1)  # [batch_size, src_len, d_model]
        enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs)  # [batch_size, src_len, src_len]
        enc_self_attns = []
        for layer in self.layers:
            # enc_outputs: [batch_size, src_len, d_model], enc_self_attn: [batch_size, n_heads, src_len, src_len]
            enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)
            enc_self_attns.append(enc_self_attn)
        return enc_outputs, enc_self_attns


model = Encoder().cuda()
criterion = nn.CrossEntropyLoss(ignore_index=0)
optimizer = optim.SGD(model.parameters(), lr=1e-5, momentum=0.99)
criterion=criterion.cuda()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == '__main__':
    # 创建一个空字典
    action_dict = {}
    # 定义一个列表来保存所有的动作类型
    actions = [
        "Request-Symptom",
        "Inform-Symptom",
        "Request-Etiology",
        "Inform-Etiology",
        "Request-Basic_Information",
        "Inform-Basic_Information",
        "Request-Existing_Examination_and_Treatment",
        "Inform-Existing_Examination_and_Treatment",
        "Request-Drug_Recommendation",
        "Inform-Drug_Recommendation",
        "Request-Medical_Advice",
        "Inform-Medical_Advice",
        "Request-Precautions",
        "Inform-Precautions",
        "Diagnose",
        "Other"
    ]

    # 遍历列表，将每个动作类型作为键，索引作为值（从0开始）
    for index, action in enumerate(actions):
        action_dict[action] = index
    label=[]
    with open("label.txt","r",encoding='utf-8') as file:
        f=file.readlines()
        for i in f:
            i=i.replace("\n","")
            label.append(action_dict[i])





    # 制作词典
    dict(train_list)
    token = Token(train_list)
    token = padding(token)






    # 搭建数据集
    class DATASET(Dataset):
        def __init__(self, data, label):
            self.data = torch.tensor(data)
            self.label = torch.tensor(label)

        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            return self.data[idx], self.label[idx]


    dataset = DATASET(token, label)
    dataloader = DataLoader(dataset, batch_size=24, shuffle=True,drop_last=True)  # 有22个样本舍弃
    # for i in dataloader:
    #     a,b=i
    #     print(len(a))
    # print(word2index)

    #
    #
    for epoch in range(1000):  # 一般训练几个epoch，即遍历几次数据集
        running_loss = 0.0
        for i in tqdm.tqdm(dataloader):
            # 清零参数梯度
            optimizer.zero_grad()
            src, tgt = i
            src = src.cuda()
            tgt = tgt.cuda()
            # 前向传播，后向传播，优化步骤
            output,enc_self_attns= model(src)
            output=output.cuda()
            loss = criterion(output.view(24, -1), tgt) # 计算损失  unsqueeze是扩张，squeeze是压缩
            loss.backward()# 后向传播，计算梯度
            optimizer.step()  # 更新权重，进行优化步骤
            running_loss += loss.item()  # 计算总损失

        print(f'Epoch {epoch + 1}, Loss: {running_loss / (len(label)-19)}')  # 打印每个epoch的损失
        if ((epoch + 1) % 100 == 0):
            torch.save(model.state_dict(), '123.pth')

