import torch
import torch.nn as nn

from ai.config.config import GxlNode
from pre_data import PrepareData
import setting
from ai.gxl_model_warehouse.transformer import transformer
import my_runner


class GxlKLDivLoss(nn.Module):
    """
    KLDivLoss: KL散度损失函数, 要求输入的pre的对数概率, 输入的target为常规概率
    新增功能:
    1. 实现平滑效果, 可以让target非正确答案的部分的概率非零, 正确答案部分的概率非1
    2. 实现padding部分的遮蔽, 把target的每个单词的padding_id的概率均设置为0, 确保
       每个时间步都不会以输出padding为目标,
       把target中正确答案为padding的时间步的概率设置为全0, 消除这个时间步对于Loss求解的影响
       也就是说这个时间步我们不关心模型输出什么.
    example:
    >>>vocab_size = 10
    >>>padding_idx = 0
    >>>inputs = torch.randn(3, 10, vocab_size)
    >>>target = torch.randint(0, 10, (3, 10))
    >>>loss = GxlKLDivLoss(vocab_size, padding_idx, 0.3)
    >>>print(loss(inputs, target))
    """

    def __init__(self, size, padding_idx, smoothing=0.0):
        super(GxlKLDivLoss, self).__init__()
        self.criterion = nn.KLDivLoss(reduction='batchmean')
        self.padding_idx = padding_idx
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.size = size
        self.true_dist = None

    def forward(self, x, target):
        """
        x: pre, log_probability, (batch_size, seq_lens, tgt_vocab_size)
        target: target, (batch_size, seq_lens)
        """
        assert x.size(-1) == self.size
        # print(target)
        true_dist = x.data.clone()
        true_dist.fill_(self.smoothing / (self.size - 2))
        true_dist.scatter_(2, target.data.unsqueeze(2), self.confidence)
        true_dist[:, :, self.padding_idx] = 0
        mask = torch.nonzero(torch.tensor(target.data == self.padding_idx))
        if mask.dim() > 0:
            for mask_item in mask:
                true_dist[mask_item[0], mask_item[1], :] = 0
        self.true_dist = true_dist
        # print(true_dist)
        res = self.criterion(x, true_dist)
        return res


class GxlWarmupOptimizer:
    """
    Warmup优化器
    新增功能: step()的同时, 使用warmup策略进行lr更新
    """

    def __init__(self, model_params, model_dim, warmup_steps, factor=1.0, min_lr=1e-6):
        # self.optimizer = torch.optim.AdamW(model_params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=0.01)
        self.optimizer = torch.optim.Adam(model_params, lr=0, betas=(0.9, 0.98), eps=1e-9)
        self.warmup_steps = warmup_steps
        self.factor = factor
        self.min_lr = min_lr
        self.model_dim = model_dim
        self._step = 0
        self._rate = 0

    def zero_grad(self):
        self.optimizer.zero_grad()

    def step(self):
        "Update parameters and rate"
        self._step += 1
        rate = self.rate()
        for p in self.optimizer.param_groups:
            p['lr'] = rate
        self._rate = rate
        # print(rate)
        self.optimizer.step()

    def rate(self, step=None):
        "Implement `lrate` above"
        if step is None:
            step = self._step
        rate = self.factor * (self.model_dim ** (-0.5) * min(step ** (-0.5), step * self.warmup_steps ** (-1.5)))
        return max(rate, self.min_lr)


def train():
    """"""
    data_pre = PrepareData('./data/train.txt', './data/dev.txt')
    data_train_iter = data_pre.train_data
    data_dev_iter = data_pre.dev_data
    loss_fun = GxlKLDivLoss(data_pre.cn_total_words, setting.PAD, 0.00)
    # loss_fun = nn.CrossEntropyLoss()
    model = transformer.Transformer(
        data_pre.en_total_words,
        1000,
        data_pre.cn_total_words,
        1000,
        num_layers=1
    )
    # optim = GxlWarmupOptimizer(
    #     model_params=model.parameters(),
    #     model_dim=model.model_dim,
    #     warmup_steps=2000
    # )
    optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.98), eps=1e-9)
    runner_man = my_runner.GxlRunnerGxl(
        model=model,
        optim=optim,
        loss_f=loss_fun,
        train_loader=data_train_iter,
        config=GxlNode.get_config_from_yaml('./config.yaml'),
        valid_loader=data_dev_iter,
    )
    runner_man.run()


if __name__ == "__main__":
    # print('hello')
    # loss_fun = GxlKLDivLoss(3123, setting.PAD, 0.00)
    # inputs = torch.randint(4, 20, (3, 10, 3123))
    # targets = torch.randint(0, 3123, (3, 10))
    # # print(loss_fun(inputs, targets))
    # inputs = -torch.abs(inputs).to(torch.float)
    # print(loss_fun(inputs, targets))
    train()
