import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.nn import Dropout
import copy
from torch import optim
from torch.utils.data import Dataset,DataLoader


Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(Device)


class Embedding(nn.Embedding):
    def __init__(self, vocab_size, embedding_dim):
        super(Embedding, self).__init__(vocab_size, embedding_dim,device=Device)


class PositionEmbedding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionEmbedding, self).__init__()
        self.pe = torch.zeros(max_len, d_model, device=Device)
        self.pe.requires_grad = False
        position = torch.arange(0, max_len, dtype=torch.float, device=Device).unsqueeze(1)
        div_term = torch.arange(0, d_model, 2, device=Device).float()
        div_term_sin = torch.sin(position / 1000 ** (2 * div_term / d_model))
        div_term_cos = torch.cos(position / 1000 ** (2 * div_term / d_model))
        self.pe[:, 0::2] = div_term_sin
        self.pe[:, 1::2] = div_term_cos

    def forward(self, x):
        x = x.to(Device)
        batch, seq_len = x.size()
        return self.pe[:seq_len, :]

class TransformEmbedding(nn.Module):
    def __init__(self, vocab_size, embedding_dim, drop=0.1):
        super(TransformEmbedding, self).__init__()
        self.embedding = Embedding(vocab_size, embedding_dim)
        self.position_embedding = PositionEmbedding(embedding_dim)
        self.dropout = Dropout(drop)

    def forward(self, x):
        # x （batch_size, seq_len)
        x = x.to(Device)
        y = self.embedding(x)
        output = y + self.position_embedding(x)
        return self.dropout(output)



class MultiAttention(nn.Module):
    def __init__(self, d_model, n_head, drop=0.1):
        super(MultiAttention, self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        self.linear_list = nn.ModuleList([nn.Linear(d_model, d_model, device=Device) for _ in range(4)])
        # self.linear_list.append(nn.Linear(d_model, d_model))
        # self.linear = nn.Linear(d_model, d_model)
        self.drop = Dropout(drop)
        self.sub_model = d_model // n_head

    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask=None):
        '''
        关于mask
        
        
        q  k  v  不相等
        q 10 5 512
        k 10 6 512
        
        10 5 6    
        mask1 = 10 1 6  k的 mask 
        
        
        
        q  k  v  相等
        q 10 5 512
        k 10 5 512
        
        10 5 5    
        mask1 = 10  5  k的 mask 
        mask2 = 5 5
        
        
        
        
        
        '''
        
        # x  10,5,512
        batch_size, length, dim = q.size()
        batch_size_kv, length_kv, dim_kv = k.size()
        q = self.linear_list[0](q)
        k = self.linear_list[1](k)
        v = self.linear_list[2](v)

        q = q.view(batch_size, length, self.n_head, self.sub_model)  # 10 5 8 64
        q = q.transpose(1, 2)  # 10,8,5,64

        # try:
        k = k.view(batch_size_kv, length_kv, self.n_head, self.sub_model)  # 10 5 8 64
        k = k.transpose(1, 2)  # 10,8,5,64
        # except Exception:
        #     print('pass')

        v = v.view(batch_size_kv, length_kv, self.n_head, self.sub_model)  # 10 5 8 64
        v = v.transpose(1, 2)  # 10,8,5,64

        # q&k / d_model
        attn_score = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.sub_model)
        if mask is not None:
            attn_score.masked_fill_(mask == False, -1e9)
        attn_score = F.softmax(attn_score, dim=-1)
        attn_output = torch.matmul(attn_score, v)

        attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, length, self.d_model)  # 10 5 512
        attn_output = self.linear_list[3](attn_output)  # 10 5 512

        return self.drop(attn_output)
    
    
    


class Laynormal(nn.Module):
    def __init__(self,d_model):
        super(Laynormal, self).__init__()
        self.offset = 1e-12
        self.gama = nn.Parameter(torch.ones(d_model,device=Device))
        self.bias = nn.Parameter(torch.ones(d_model,device=Device))
    
    def forward(self, x):
        x = x.to(Device)
        x_mean = torch.mean(x, dim=-1, keepdim=True)
        x_var = torch.var(x, dim=-1,unbiased=False, keepdim=True)
        x = (x - x_mean) / torch.sqrt(x_var + self.offset)
        return self.gama * x + self.bias


class FeedForward(nn.Module):
    def __init__(self,d_model,middle_dim,drop=0.1):
        super(FeedForward, self).__init__()
        self.fc1 = nn.Linear(d_model, middle_dim,device=Device)
        self.fc2 = nn.Linear(middle_dim, d_model,device=Device)
        self.drop = Dropout(drop)
    
    def forward(self, x):
        x = x.to(Device)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        return self.drop(x)


class EncoderLayer(nn.Module):
    def __init__(self,d_model,middle_dim,n_head,drop=0.1):
        super(EncoderLayer, self).__init__()
        self.attention = MultiAttention(d_model, n_head, drop)
        self.laynormal_1 = Laynormal(d_model)
        self.feed_forward = FeedForward(d_model, middle_dim, drop)
        self.laynorma_2 = copy.deepcopy(self.laynormal_1)
        self.dropout_1 = Dropout(drop)
        self.dropout_2 = copy.deepcopy(self.dropout_1)
          
        
    def forward(self, x,mask):
        x = x.to(Device)
        x = self.laynormal_1(x+self.dropout_1(self.attention(x,x,x,mask)))
        x = self.laynorma_2(x+self.dropout_2(self.feed_forward(x)))
        return x

class DecoderLayer(nn.Module):
    def __init__(self,d_model,middle_dim,n_head,drop=0.1):
        super(DecoderLayer, self).__init__()
        self.attention = MultiAttention(d_model, n_head, drop)
        self.laynormal_1 = Laynormal(d_model)
        self.feed_forward = FeedForward(d_model, middle_dim, drop)
        
        self.laynormal_2 = copy.deepcopy(self.laynormal_1)
        self.laynormal_3 = copy.deepcopy(self.laynormal_1)
        
        self.dropout_1 = Dropout(drop)
        self.dropout_2 = copy.deepcopy(self.dropout_1)
        self.dropout_3 = copy.deepcopy(self.dropout_1)

    def forward(self, dec,enc,mask_self,mask_cross):
        dec = dec.to(Device)
        enc = enc.to(Device)
        _x = dec
        
        q = self.laynormal_1(_x+self.dropout_1(self.attention(dec,dec,dec,mask_self)))
        # _x = q
        x = self.laynormal_2(q+self.dropout_2(self.attention(q,enc,enc,mask_cross)))
        x = self.laynormal_3(x+self.dropout_3(self.feed_forward(x)))
        return x


class Encoder(nn.Module):
    def __init__(self, vocab_size,d_model,middle_dim,n_head,n_layer,drop=0.1):
        super(Encoder, self).__init__()
        self.embedding = TransformEmbedding(vocab_size,d_model,drop)
        self.layers = nn.ModuleList([EncoderLayer(d_model,middle_dim,n_head,drop) for _ in range(n_layer)])
    
    
    def forward(self,x,mask):
        x = x.to(Device)
        x = self.embedding(x)
        for layer in self.layers:
            x = layer(x,mask)
        return x

        
class Decoder(nn.Module):
    def __init__(self, vocab_size,d_model,middle_dim,n_head,n_layer,drop=0.1):
        super(Decoder, self).__init__()
        self.embedding = TransformEmbedding(vocab_size,d_model,drop)
        self.layers = nn.ModuleList([DecoderLayer(d_model,middle_dim,n_head,drop) for _ in range(n_layer)])
        self.fc = nn.Linear(d_model,vocab_size,device=Device)
    
    def forward(self,dec,enc,mask_self,mask_cross):
        dec = dec.to(Device)
        dec = self.embedding(dec)
        for layer in self.layers:
            dec = layer(dec,enc,mask_self,mask_cross)
        # 5 10 512 @ 512 100  -> 5 10 100
        return self.fc(dec)



class Transform(nn.Module):
    def __init__(self,vocab_size_src,vocab_size_dst,d_model,middle_dim,n_head,n_layer,drop=0.1):
        super(Transform, self).__init__()
        self.encoder = Encoder(vocab_size_src,d_model,middle_dim,n_head,n_layer,drop)
        self.decoder = Decoder(vocab_size_dst,d_model,middle_dim,n_head,n_layer,drop)
        
        
    def forward(self,src,dst,mask,mask_self,mask_cross):
        enc = self.encoder(src,mask)
        output = self.decoder(dst, enc, mask_self, mask_cross)
        return output


class SimpleDataset(Dataset):
    def __init__(self, num):
        prefix = torch.tensor([0])  # 前面插入0
        suffix = torch.tensor([9])  # 后面追加9

        self.data = [torch.randint(1, 9, size=(5,)) for _ in range(num)]
        # self.labels = [torch.randint(0,10,size=(5,)) for _ in range(10)]
        self.labels = [torch.cat([prefix, _, suffix], dim=0) for _ in copy.deepcopy(self.data)]

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


def create_data(batch_size=5, num=100):
    dataset = SimpleDataset(num)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    # for _ in data_loader:
    #     print(_[0])
    #     print(_[1])
    #     print('\n')
    return data_loader


def train():
    vocab_size_enc = 10
    vocab_size_dec = 10

    dim = 512
    middle_dim = 4*dim
    n_layer = 6
    n_head = 8
    intervals = 30
    
    model = Transform(vocab_size_enc,vocab_size_dec, dim, middle_dim, n_head, n_layer)
    model = model.to(Device)
    model.train()
    
    encode_opt = optim.Adam(model.parameters(), lr=1e-4)
    
    mask = torch.triu(torch.ones(6, 6), diagonal=1) == 0
    mask = mask.view(1, 1, 6, 6)
    mask_self = mask.to(Device)
    
    criterion = nn.CrossEntropyLoss()

    datas = create_data(32, 1000)

    for interval in range(intervals):
        loss_all = 0
        for data, label in datas:
            label = label.to(Device)
            data = data.to(Device)
            input_label = label[:, :-1]
            output_label = label[:, 1:]
            
            output = model(data, input_label,mask=None, mask_self=mask_self, mask_cross=None)
            
            loss:torch.Tensor = criterion(
                output.reshape(-1,output.size(-1)), output_label.reshape(-1)
            )
            loss_all += loss.item()
            loss.backward()
            encode_opt.step()
            encode_opt.zero_grad()
        else:
            print(f'{interval} 轮 loss_all',loss_all/len(datas))
    else:
        model.eval()
        with torch.no_grad():
            # 测试数据
            test_in_datas = [1, 2, 3, 4, 5]
            test_in_data = torch.Tensor(test_in_datas).long().view(1, -1)
        
            # label = [1,2,3,4,5]
            label = [0]
            label = torch.Tensor(label).long().view(1, -1)
        
            for _ in range(10):
                test_in_data = test_in_data.to(Device)
                label = label.to(Device)
        
                mask = torch.triu(torch.ones(len(label), len(label)), diagonal=1) == 0
                # mask = mask.view(1,1,1,1)
                mask_self = mask.to(Device)
        
                output = model(test_in_data,label, None, mask_self, None)
                log_softmax = F.log_softmax(output, dim=-1)
                # print(log_softmax)
                max_value = log_softmax.argmax(dim=-1)
                # label = max_value[-1]
                label = torch.cat([label, max_value[:, -1].view(1, -1)], dim=1)
                print(label)
                if max_value[:, -1].item() == 9:  # 9为结束符
                    break


if __name__ == '__main__':
    train()


    


