#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Shi sha
Date: 2020-12-31
Desciption: 训练一个基于bert的seq2seq模型，参考作业3的solution
'''

import sys
sys.path.append('..')
import torch
from tqdm import tqdm
import os
from configs.settings import (
    BATCH_SIZE, 
    LR,
    MAX_LENGTH, 
    MAX_GRAD_NORM,
    SAVED_BERT_PATH, 
    SAVED_GENERATIVE_PATH
)
from generative.seq2seq import Seq2SeqModel
from generative.bert_model import BertConfig
import time
from torch.utils.data import Dataset, DataLoader
from generative.tokenizer import Tokenizer, load_chinese_base_vocab
from site_packages.utils.job import DataOp
import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


GRADIENT_ACCUMULATION = 32


def load_dataset(dataset):
    df = DataOp.load_data(dataset)
    question = df['question'].tolist()
    answer = df['answer'].tolist()
    return question, answer


# 自定义dataset
class CustomizedDataset(Dataset):
    """
    生成训练集、验证集或者测试集的Dataset类
    """
    def __init__(self, dataset):
        super(CustomizedDataset, self).__init__()
        # 读原始数据
        self.sents_src, self.sents_tgt = load_dataset(dataset)
        self.word2idx = load_chinese_base_vocab()
        self.idx2word = {k: v for v, k in self.word2idx.items()}
        self.tokenizer = Tokenizer(self.word2idx)

    def __getitem__(self, i):
        # 得到单个数据
        src = self.sents_src[i] if len(self.sents_src[i]) < MAX_LENGTH else self.sents_src[i][:MAX_LENGTH]
        tgt = self.sents_tgt[i] if len(self.sents_tgt[i]) < MAX_LENGTH else self.sents_tgt[i][:MAX_LENGTH]

        token_ids, token_type_ids = self.tokenizer.encode(src, tgt)
        output = {
            "token_ids": token_ids,
            "token_type_ids": token_type_ids,
        }
        return output

    def __len__(self):
        return len(self.sents_src)


def collate_fn(batch):
    """
    动态padding， batch为一部分sample
    """
    def padding(indice, max_length, pad_idx=0):
        """
        pad 函数
        注意 token type id 右侧pad是添加1而不是0，1表示属于句子B
        """
        pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
        return torch.tensor(pad_indice)

    token_ids = [data["token_ids"] for data in batch]
    max_length = max([len(t) for t in token_ids])
    token_type_ids = [data["token_type_ids"] for data in batch]

    token_ids_padded = padding(token_ids, max_length)
    token_type_ids_padded = padding(token_type_ids, max_length)
    target_ids_padded = token_ids_padded[:, 1:].contiguous()

    return token_ids_padded, token_type_ids_padded, target_ids_padded


class Trainer:
    
    """
    模型训练器，用于bert的整个训练过程

    args:
        checkpoint：整型，模型存档点，用于加载已经训练的存档
    attr:
        model_path：加载的模型的路径，当checkpoint为None，设置为预训练模型路径；
            当checkpoint有值的时候，路径为模型存档路径
        word2idx: 预训练模型的字典
        device："cpu"或者"gpu"
        bert_model：创建基于bert模型的seq2seq模型，并初始化参数
        optim_parameters：待优化的参数
        optimizer：优化器，选择Adan优化器

    """
    
    def __init__(self, checkpoint=None):
        self.model_path = SAVED_BERT_PATH if checkpoint is None else SAVED_GENERATIVE_PATH
        self.batch_size = BATCH_SIZE
        self.word2idx = load_chinese_base_vocab() 
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 
        bertconfig = BertConfig()
        if hasattr(bertconfig, 'vocab_size'):
            setattr(bertconfig, 'vocab_size', len(self.word2idx))
        self.bert_model = Seq2SeqModel(config=bertconfig) 
        self.load_model(self.bert_model, checkpoint)
        self.bert_model.to(self.device) 
        self.optim_parameters = list(self.bert_model.parameters()) 
        self.optimizer = torch.optim.Adam(self.optim_parameters, lr=LR, weight_decay=1e-3)
        self.trainloader, self.devloader = self.get_dataloader(BATCH_SIZE)

    def get_dataloader(self, batch_size):
        "生成训练和验证的数据加载器"
        train = CustomizedDataset('bert_train')
        dev = CustomizedDataset('bert_valid')
        trainloader = DataLoader(train, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
        devloader = DataLoader(dev, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
        return trainloader, devloader
        
    def load_model(self, model, checkpoint):
        """加载模型"""
        if checkpoint is None:    
            model_path = os.path.join(self.model_path, 'pytorch_model.bin')
            checkpoint = torch.load(model_path)
            checkpoint = {k[5:]: v for k, v in checkpoint.items()
                          if k[:4] == "bert" and "pooler" not in k}
        else:
            model_path = os.path.join(self.model_path, "bert.model.epoch.{}".format(checkpoint))
            checkpoint = torch.load(model_path)

        model.load_state_dict(checkpoint, strict=False)
        torch.cuda.empty_cache()
        logger.info("{} loaded!".format(model_path))

    def train(self, epoch):
        """训练"""
        logger.info('starting training')
        self.bert_model.train()
        self.iteration(epoch, dataloader=self.trainloader)
        logger.info('training finished')

    def iteration(self, epoch, dataloader):
        """每次迭代过程"""
        total_loss = 0
        start_time = time.time() ## 得到当前时间
        for batch_idx, data in enumerate(tqdm(dataloader,position=0, leave=True)):
            token_ids, token_type_ids, target_ids = data
            token_ids = token_ids.to(self.device)
            token_type_ids = token_type_ids.to(self.device)
            target_ids = target_ids.to(self.device)
            # 因为传入了target标签，因此会计算loss并且返回
            enc_layers, logits, loss, attention_layers = self.bert_model(token_ids,
                                                token_type_ids,
                                                labels=target_ids
                                                )
            loss = loss / GRADIENT_ACCUMULATION
            loss.backward()
            if (batch_idx + 1) % GRADIENT_ACCUMULATION == 0:
                    # 为计算当前epoch的平均loss
                    total_loss += loss.item()
                    # 更新参数
                    self.optimizer.step()
                    # 清空梯度信息
                    self.optimizer.zero_grad()
            torch.nn.utils.clip_grad_norm_(self.bert_model.parameters(), MAX_GRAD_NORM)
        end_time = time.time()
        spend_time = end_time - start_time
        # 打印训练信息
        logger.info(f"epoch is {epoch}. loss is {loss:06}. spend time is {spend_time}")
        # 保存模型
        self.save_state_dict(self.bert_model, epoch)

    def evaluate(self):
        """评估"""
        logger.info("start evaluating model")
        self.bert_model.eval()
        logger.info('starting evaluating')
        batch_num = 1
        with torch.no_grad():
            for token_ids, token_type_ids, target_ids in tqdm(self.devloader, position=0, leave=True):
                token_ids = token_ids.to(self.device)
                token_type_ids = token_type_ids.to(self.device)
                target_ids = target_ids.to(self.device)

                enc_layers, logits, loss, _ = self.bert_model(token_ids,
                                                token_type_ids,
                                                labels=target_ids
                                                )

                loss = loss.mean()
                batch_num += 1
                logger.info("evaluate batch {} ,loss {}".format(batch_num, loss))
        logger.info("finishing evaluating")

    def save_state_dict(self, model, epoch):
        """存储当前模型参数"""
        save_path = os.path.join(SAVED_GENERATIVE_PATH, "bert.model.epoch.{}".format(str(epoch)))
        torch.save(model.state_dict(), save_path)
        logger.info("{} saved!".format(save_path))


if __name__ == "__main__":
    trainer = Trainer()
    train_epoches = 10

    for epoch in range(train_epoches):
        # 训练一个epoch
        trainer.train(epoch)
    ## 评估
    # trainer.evaluate()