import torch
import torch.nn as nn
from torch.nn import Dropout
import torch.nn.functional as F
import math
from a_1_embedding import Embedding
import copy
import time
from torch.utils.data import Dataset, DataLoader
from rich import print
import copy
from torch import optim

Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(Device)


DATA_EXISTS=[]

class SimpleDataset(Dataset):
    def __init__(self, num):
        prefix = torch.tensor([0])  # 前面插入0
        suffix = torch.tensor([9])  # 后面追加9

        self.data = [torch.randint(1, 9, size=(5,)) for _ in range(num)]
        # self.labels = [torch.randint(0,10,size=(5,)) for _ in range(10)]
        self.labels = [torch.cat([prefix, _, suffix], dim=0) for _ in copy.deepcopy(self.data)]

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]


def create_data(batch_size=5, num=100):
    dataset = SimpleDataset(num)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    # for _ in data_loader:
    #     print(_[0])
    #     print(_[1])
    #     print('\n')
    return data_loader


# 开始训练
def train():
    # decoder的句子长度是  6
    from a_6_encoder_lay import Encoder
    from a_6_encoder_lay import Decoder

    vocab_size_enc = 10
    vocab_size_dec = 10

    dim = 512
    middle_dim = 4*512
    n_layer = 6
    n_head = 8
    intervals = 30

    encoder = Encoder(vocab_size_dec, dim, middle_dim, n_head, n_layer)
    decoder = Decoder(vocab_size_enc, dim, middle_dim, n_head, n_layer)

    # encoder.load_state_dict(torch.load('./models/encoder_model_last2.pt'))
    # decoder.load_state_dict(torch.load('./models/decoder_model_last2.pt'))

    # encoder = torch.load('./models/full_model/encoder_all_model.pth')
    # decoder = torch.load('./models/full_model/decoder_all_model.pth')

    encoder.train()
    decoder.train()

    encode_opt = optim.SGD(encoder.parameters(), lr=1e-1)
    decode_opt = optim.SGD(decoder.parameters(), lr=1e-1)

    mask_enc = None

    # decoder 中 self-attention 的 mask 掩盖未来的值
    mask = torch.triu(torch.ones(6, 6), diagonal=1) == 0
    mask = mask.view(1, 1, 6, 6)
    mask_self = mask.to(Device)
    mask_cross = None

    datas = create_data(32, 500)
    criterion = nn.NLLLoss()

    for interval in range(intervals):
        # 共计intervals轮
        # print(f'第 {interval} 轮训练')
        loss_all = 0

        for data, label in datas:
            label = label.to(Device)
            data = data.to(Device)

            global DATA_EXISTS
            DATA_EXISTS = data

            input_label = label[:, :-1]
            output_label = label[:, 1:]

            # print(data.size())
            # print(label.size())
            # print('\n')
            enc = encoder(data, mask_enc)
            output = decoder(input_label, enc, mask_self, mask_cross)
            # print('output',output.size())
            log_softmax = F.log_softmax(output, dim=-1) # 32  6  10    batch_size, seq_len, vocab_size
            # loss: torch.Tensor = criterion(log_softmax.transpose(2, 1).reshape(32*6,10), output_label.reshape(-1))  # 32,6  32*6
            loss: torch.Tensor = criterion(log_softmax.reshape(-1,10), output_label.reshape(-1))  # 32,6  32*6
            loss_all += loss.item()

            loss.backward()
            encode_opt.step()
            decode_opt.step()

            encode_opt.zero_grad()
            decode_opt.zero_grad()
        else:
            print(f'{interval} 轮 loss_all', loss_all / len(datas))
            # 保存模型
            # torch.save(encoder.state_dict(), f'./models/encoder_model_last2.pt')
            # torch.save(decoder.state_dict(), f'./models/decoder_model_last2.pt')
            torch.save(encoder, './models/full_model/encoder_all_model.pth')
            torch.save(decoder, './models/full_model/decoder_all_model.pth')

            encoder.eval()
            decoder.eval()
            with torch.no_grad():
                # 测试数据
                test_in_datas = [1,2,3,4,5]
                print(test_in_datas)
                test_in_data = torch.Tensor(test_in_datas).long().view(1, -1)

                # label = [1,2,3,4,5]
                label = [0]
                label = torch.Tensor(label).long().view(1, -1)

                while 1:
                    test_in_data = test_in_data.to(Device)
                    label = label.to(Device)

                    mask = torch.triu(torch.ones(len(label), len(label)), diagonal=1) == 0
                    # mask = mask.view(1,1,1,1)
                    mask_self = mask.to(Device)

                    enc = encoder(test_in_data, None)
                    output = decoder(label, enc, mask_self, None)
                    log_softmax = F.log_softmax(output, dim=-1)
                    # print(log_softmax)
                    max_value = log_softmax.argmax(dim=-1)
                    # label = max_value[-1]
                    label = torch.cat([label, max_value[:, -1].view(1, -1)], dim=1)
                    print(label)
                    if max_value[:, -1].item() == 9:  # 9为结束符
                        break
            encoder.train()
            decoder.train()


    # encoder.eval()
    # decoder.eval()
    # with torch.no_grad():
    #     # 测试数据
    #     test_in_datas = [1, 2, 3, 4, 5]
    #     test_in_data = torch.Tensor(test_in_datas).long().view(1, -1)
    #
    #     # label = [1,2,3,4,5]
    #     label = [0]
    #     label = torch.Tensor(label).long().view(1, -1)
    #
    #     while 1:
    #         test_in_data = test_in_data.to(Device)
    #         label = label.to(Device)
    #
    #         mask = torch.triu(torch.ones(len(label), len(label)), diagonal=1) == 0
    #         # mask = mask.view(1,1,1,1)
    #         mask_self = mask.to(Device)
    #
    #         enc = encoder(test_in_data, None)
    #         output = decoder(label, enc, mask_self, None)
    #         log_softmax = F.log_softmax(output, dim=-1)
    #         # print(log_softmax)
    #         max_value = log_softmax.argmax(dim=-1)
    #         # label = max_value[-1]
    #         label = torch.cat([label, max_value[:, -1].view(1, -1)], dim=1)
    #         print(label)
    #         if max_value[:, -1].item() == 9:  # 9为结束符
    #             break
    #

def predict():
    encoder = torch.load('./models/full_model/encoder_all_model.pth')
    decoder = torch.load('./models/full_model/decoder_all_model.pth')

    encoder.to(Device)
    decoder.to(Device)

    encoder.eval()
    decoder.eval()

    with torch.no_grad():
        # 测试数据
        test_in_datas = [1, 2, 3, 4, 5]
        test_in_data = torch.Tensor(test_in_datas).long().view(1, -1)

        # label = [1,2,3,4,5]
        label = [0,1,2,3,4,5]
        label = torch.Tensor(label).long().view(1, -1)

        while 1:
            test_in_data = test_in_data.to(Device)
            label = label.to(Device)

            mask = torch.triu(torch.ones(len(label), len(label)), diagonal=1) == 0
            # mask = mask.view(1,1,1,1)
            mask_self = mask.to(Device)

            enc = encoder(test_in_data, None)
            output = decoder(label, enc, mask_self, None)
            log_softmax = F.log_softmax(output, dim=-1)
            # print(log_softmax)
            max_value = log_softmax.argmax(dim=-1)
            # label = max_value[-1]
            label = torch.cat([label, max_value[:, -1].view(1, -1)], dim=1)
            print(label)
            if max_value[:, -1].item() == 9:  # 9为结束符
                break


if __name__ == '__main__':
    # 创建数据集对象
    train()
    # predict()
