## 自动写诗的例子
import io
import sys
sys.path.append("/NLP/bert_seq2seq-master/RoBERTa")
import torch
from tqdm import tqdm
import numpy as np
import json
from .config import sentiment_batch_size, sentiment_lr, roberta_chinese_model_path
from .model.test_seq2seq_model import Seq2SeqModel
from .model.roberta_model import BertConfig
import time
from torch.utils.data import Dataset, DataLoader
from .tokenizer import Tokenizer, load_chinese_base_vocab



def create_dataset():
    input_result = []
    output_result = []
    pre_scr = ""
    count = 0
    target = io.open("../unlim-seq2seq/Data/6000_data_result/question_train", encoding='UTF-8')
    source = io.open("../unlim-seq2seq/Data/6000_data_result/source_train", encoding='UTF-8')
    for scr, tar in zip(source, target):
        scr = scr.replace("\n", "")
        tar = tar.replace("\n", "")
        if len(scr) + len(tar) > 100:
            continue
        input_result.append(scr)
        output_result.append(tar)
    print(len(input_result))
    return input_result[:1], output_result[:1]

## 自定义dataset
class PoemDataset(Dataset):
    """
    针对特定数据集，定义一个相关的取数据的方式
    """

    def __init__(self):
        ## 一般init函数是加载所有数据
        super(PoemDataset, self).__init__()
        # 读原始数据
        self.sents_src, self.sents_tgt = create_dataset()
        self.word2idx = load_chinese_base_vocab()
        self.idx2word = {k: v for v, k in self.word2idx.items()}
        self.tokenizer = Tokenizer(self.word2idx)
        # print(self.sents_src[:3])

    def __getitem__(self, i):
        ## 得到单个数据
        # print(i)
        src = self.sents_src[i]
        tgt = self.sents_tgt[i]
        token_ids, token_type_ids = self.tokenizer.encode(src, tgt)
        input_length = token_type_ids[:token_type_ids.index(1)]
        output = {
            "token_ids": token_ids,
            "token_type_ids": token_type_ids,
            "input_length": input_length
        }
        return output

    def __len__(self):
        return len(self.sents_src)

def collate_fn(batch):
    """
    动态padding， batch为一部分sample
    """

    def padding(indice, max_length, pad_idx=0):
        """
        pad 函数
        注意 token type id 右侧pad是添加1而不是0，1表示属于句子B
        """
        pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
        return torch.tensor(pad_indice)

    token_ids = [data["token_ids"] for data in batch]
    max_length = max([len(t) for t in token_ids])
    token_type_ids = [data["token_type_ids"] for data in batch]
    input_length = [data["input_length"] for data in batch]

    token_ids_padded = padding(token_ids, max_length)
    token_type_ids_padded = padding(token_type_ids, max_length, pad_idx=1)
    target_ids_padded = token_ids_padded[:, 1:].contiguous()

    return token_ids_padded, token_type_ids_padded, target_ids_padded, input_length


class PoemTrainer:
    def __init__(self, random):
        # 加载情感分析数据
        self.random = random
        self.pretrain_model_path = roberta_chinese_model_path
        # 这个最近模型的路径可以用来继续训练，而不是每次从头训练
        # self.recent_model_path = "../poem_state_dict/bert_poem.model.epoch.9"
        self.batch_size = sentiment_batch_size
        self.lr = sentiment_lr
        # 加载字典
        self.word2idx = load_chinese_base_vocab()
        # 判断是否有可用GPU
        self.device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
        print("device: " + str(self.device))
        # 定义模型超参数
        bertconfig = BertConfig(vocab_size=len(self.word2idx))
        # 初始化BERT模型
        self.bert_model = Seq2SeqModel(config=bertconfig)
        ## 加载预训练的模型～
        self.load_model(self.bert_model, self.pretrain_model_path)
        # self.load_recent_model(self.bert_model, self.recent_model_path)
        # 将模型发送到计算设备(GPU或CPU)
        self.bert_model.to(self.device)
        # 声明需要优化的参数
        self.optim_parameters = list(self.bert_model.parameters())
        # 声明自定义的数据加载器
        self.init_optimizer(self.lr)
        dataset = PoemDataset()
        # dataset1 = PoemDataset1()
        self.dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=True, collate_fn=collate_fn)
        # self.dataloader1 = DataLoader(dataset1, batch_size=self.batch_size, shuffle=True, collate_fn=collate_fn)

    def init_optimizer(self, lr):
        # 用指定的学习率初始化优化器
        # 冻结某层参数
        for param in self.bert_model.parameters():
            param.requires_grad = True
        print("模型参数解冻")
        self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)

    def load_model(self, model, pretrain_model_path):

        checkpoint = torch.load(pretrain_model_path)
        # 模型刚开始训练的时候, 需要载入预训练的BERT

        checkpoint = {k[5:]: v for k, v in checkpoint.items()
                      if k[:4] == "bert" and "pooler" not in k}
        # for k, v in checkpoint.items():
        #     print(k)
        #
        # for k, v in checkpoint.items():
        #     if k[:4] == "bert" and "pooler" not in k:
        #         print(k)
        model.load_state_dict(checkpoint, strict=False)
        torch.cuda.empty_cache()
        print("{} loaded!".format(pretrain_model_path))

    def load_recent_model(self, model, recent_model_path):
        checkpoint = torch.load(recent_model_path)
        model.load_state_dict(checkpoint)
        torch.cuda.empty_cache()
        print(str(recent_model_path) + "loaded!")

    def train(self, epoch):
        # 一个epoch的训练
        self.bert_model.train()

        self.iteration(epoch, train=True)

    def freeze_parameters(self):
        freeze_layers = ['layer.0', 'layer.1', 'layer.6', 'bert.pooler', 'out.']

        for name, param in self.bert_model.named_parameters():
            param.requires_grad = True
            for ele in freeze_layers:
                if ele in name:
                    param.requires_grad = False
                    break
        print("冻结模型参数")
        self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.bert_model.parameters), lr=self.lr, weight_decay=1e-3)

    def iteration(self, epoch, train=True):
        total_loss = 0
        total_loss1 = 0
        start_time = time.time()  ## 得到当前时间
        step = 0
        for token_ids, token_type_ids, target_ids, input_length in tqdm(self.dataloader, position=0, leave=True):
            step += 1
            if step % 200 == 0:
                self.bert_model.eval()
                test_data = ["采取减振工程措施时，不应削弱轨道结构的强度、稳定性及平顺性。"]
                for text in test_data:
                    print(self.bert_model.generate(text, beam_size=2, device=self.device))
                self.bert_model.train()

            token_ids = token_ids.to(self.device)

            token_type_ids = token_type_ids.to(self.device)
            target_ids = target_ids.to(self.device)

            # 因为传入了target标签，因此会计算loss并且返回
            # try:
            predictions, loss = self.bert_model(token_ids,
                                                token_type_ids,
                                                labels=target_ids,
                                                device=self.device,
                                                input_length=input_length
                                                )
            if train:
                # 清空之前的梯度
                self.optimizer.zero_grad()
                # 反向传播, 获取新的梯度
                loss.backward()
                # 用获取的梯度更新模型参数
                self.optimizer.step()

            # 为计算当前epoch的平均loss
            total_loss1 += loss.item()


        with open("./losses", "a") as f:
            # epoch_loss = np.mean(total_loss)
            epoch_loss1 = np.mean(total_loss1)
            # f.write("Epoch: {}, Loss_1t: {}, Loss_2: {}\n".format(str(epoch), str(epoch_loss), str(epoch_loss1)))
            f.write("Epoch: {}, Loss_1t: {}\n".format(str(epoch), str(epoch_loss1)))
        end_time = time.time()
        spend_time = end_time - start_time
        # 打印训练信息
        # print("epoch is " + str(epoch) + ". loss_1 is " + str(total_loss) + ". spend time is " + str(spend_time))
        print("epoch is " + str(epoch) + ". loss_1 is " + str(total_loss1) + ". spend time is " + str(spend_time))
        # 保存模型
        self.bert_model.eval()
        test_data = ["适用于地基差的场地，但耐腐蚀性差，需经常维护。"]
        for text in test_data:
            print(self.bert_model.generate(text, beam_size=3, device=self.device))
        self.bert_model.train()
        self.save_state_dict(self.bert_model, epoch)

    def save_state_dict(self, model, epoch, file_path="bert_poem.model"):
        """存储当前模型参数"""
        epoch = epoch % 5
        save_path = "./train_model_dropout/" + file_path + ".epoch.{}".format(str(epoch))
        torch.save(model.state_dict(), save_path)
        print("{} saved!".format(save_path))


if __name__ == '__main__':

    trainer = PoemTrainer(True)
    train_epoches = 400
    for epoch in range(1, train_epoches + 1):
        # 训练一个epoch
        torch.cuda.empty_cache()
        trainer.train(epoch)
