# Code taken here https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/kfac.py (with minor modifications)
import math

import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.optimizer import StateDict


# TODO: In order to make this code faster:
# 1) Implement _extract_patches as a single cuda kernel
# 2) Compute QR decomposition in a separate process
# 3) Actually make a general KFAC optimizer so it fits PyTorch


class AddBias(nn.Module):
    '''
    自定义偏置计算层
    '''
    def __init__(self, bias):
        super(AddBias, self).__init__()
        # todo 为啥要增加一个维度
        self._bias = nn.Parameter(bias.unsqueeze(1))

    def forward(self, x):
        if x.dim() == 2:
            bias = self._bias.t().view(1, -1)
        else:
            bias = self._bias.t().view(1, -1, 1, 1)

        return x + bias


def _extract_patches(x, kernel_size, stride, padding):
    '''
    todo 了解整个过程

    x: 输入的Tensor
    kernel_size: 卷积核大小
    stride: 步长
    padding: 填充
    '''

    if padding[0] + padding[1] > 0:
        # 填充输入的tensor
        x = F.pad(x, (padding[1], padding[1], padding[0],
                      padding[0])).data  # Actually check dims
    # x.unfold(2, kernel_size[0], stride[0]) 在维度2（即高度方向）上应用滑动窗口，窗口大小为 kernel_size[0]，步长为 stride[0]。
    x = x.unfold(2, kernel_size[0], stride[0])
    # x.unfold(3, kernel_size[1], stride[1]) 在维度3（即宽度方向）上应用滑动窗口，窗口大小为 kernel_size[1]，步长为 stride[1]。
    x = x.unfold(3, kernel_size[1], stride[1])
    # 调整了维度的顺序，将窗口的位置信息移到前面。
    x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
    # 数据重新组织成新的形状，其中前三个维度保持不变，最后一个维度被拉平，包含了所有滑动窗口的元素
    x = x.view(
        x.size(0), x.size(1), x.size(2), x.size(3) * x.size(4) * x.size(5))
    return x


def compute_cov_a(a, classname, layer_info, fast_cnn):
    '''
    计算输入tensor的协方差矩阵
    todo 了解协方差矩阵

    a: 输入Tensor
    classname: 类名
    layer_info: 卷积层的信息（卷积核大小，步长，填充）
    fast_cnn: 是否快速卷积神经网络
    '''

    batch_size = a.size(0)

    if classname == 'Conv2d':
        # todo 快速卷积的计算是在做什么？
        if fast_cnn:
            a = _extract_patches(a, *layer_info)
            a = a.view(a.size(0), -1, a.size(-1))
            a = a.mean(1)
        else:
            a = _extract_patches(a, *layer_info)
            a = a.view(-1, a.size(-1)).div_(a.size(1)).div_(a.size(2))
    elif classname == 'AddBias':
        is_cuda = a.is_cuda
        a = torch.ones(a.size(0), 1)
        if is_cuda:
            a = a.cuda()

    # todo
    return a.t() @ (a / batch_size)


def compute_cov_g(g, classname, layer_info, fast_cnn):
    '''
    计算梯度的协方差矩阵
    todo 了解梯度的协方差矩阵计算的过程

    g: 梯度Tensor
    classname: 类名
    layer_info: 卷积层的信息（卷积核大小，步长，填充）
    fast_cnn: 是否快速卷积神经网络
    '''

    batch_size = g.size(0)

    if classname == 'Conv2d':
        if fast_cnn:
            g = g.view(g.size(0), g.size(1), -1)
            g = g.sum(-1)
        else:
            g = g.transpose(1, 2).transpose(2, 3).contiguous()
            g = g.view(-1, g.size(-1)).mul_(g.size(1)).mul_(g.size(2))
    elif classname == 'AddBias':
        g = g.view(g.size(0), g.size(1), -1)
        g = g.sum(-1)

    g_ = g * batch_size
    return g_.t() @ (g_ / g.size(0))


def update_running_stat(aa, m_aa, momentum):
    '''
    更新运行统计
    '''
    # Do the trick to keep aa unchanged and not create any additional tensors
    # todo 这边是在计算什么？
    m_aa *= momentum / (1 - momentum)
    m_aa += aa
    m_aa *= (1 - momentum)


class SplitBias(nn.Module):
    def __init__(self, module):
        '''
        module: 存在bias偏置的模型层
        '''

        super(SplitBias, self).__init__()
        self.module = module
        self.add_bias = AddBias(module.bias.data)
        # 因为自定义了偏置计算，所以将原模型层中的偏置bias设置为None
        self.module.bias = None

    def forward(self, input):
        x = self.module(input)
        x = self.add_bias(x)
        return x


class KFACOptimizer(optim.Optimizer):
    '''
    自定义优化器，因为ACKTR使用的是二阶导数进行优化，所以需要使用KFAC优化器
    由于使用二阶导数计算量较大，所以这里使用了Kronecker因子近似曲率优化器
    P320
    todo 了解算法
    '''
    def __init__(self,
                 model,
                 lr=0.25,
                 momentum=0.9,
                 stat_decay=0.99,
                 kl_clip=0.001,
                 damping=1e-2,
                 weight_decay=0,
                 fast_cnn=False,
                 Ts=1,
                 Tf=10):
        '''
        model: 动作预测模型
        lr: 学习率

        momentum: todo
        stat_decay: todo
        kl_clip: todo
        damping: todo
        weight_decay: todo
        fast_cnn: todo
        Ts: todo
        Tf: todo
        '''
        defaults = dict()

        def split_bias(module):
            '''
            将存在偏执bias的模型，替换包装为SplitBias类型，也就是自定义bias偏置的计算

            module: 动作模型
            '''
            # named_children：列出模型的第一层级的子层，不往下进行深入递归，这里之所以使用named_children，是因为递归自己实现
            # todo 验证
            for mname, child in module.named_children():
                if hasattr(child, 'bias'):
                    module._modules[mname] = SplitBias(child)
                else:
                    split_bias(child)

        # 将模型中存在偏置的模型层替换为自定义的偏置计算层
        split_bias(model)

        # 调用父类构造函进行初始化
        # todo defaults传0是什么意思，起到了什么作用
        super(KFACOptimizer, self).__init__(model.parameters(), defaults)

        self.known_modules = {'Linear', 'Conv2d', 'AddBias'}

        # 存储self.model的每一层
        self.modules = []
        self.grad_outputs = {}

        # 存储待优化的模型（这里是动作预测模型）
        self.model = model
        self._prepare_model()

        # 记录更新了多少次参数，调用了多少次step
        self.steps = 0

        # todo
        self.m_aa, self.m_gg = {}, {}
        self.Q_a, self.Q_g = {}, {}
        self.d_a, self.d_g = {}, {}

        # todo 动量
        self.momentum = momentum
        # todo 统计衰减
        self.stat_decay = stat_decay

        # 学习率
        self.lr = lr
        # todo kl散度
        self.kl_clip = kl_clip
        # todo 阻尼
        self.damping = damping
        # todo 权重衰减
        self.weight_decay = weight_decay

        # todo 快速卷积神经网络 作用
        self.fast_cnn = fast_cnn

        # todo Ts作用
        self.Ts = Ts
        # todo Tf作用
        self.Tf = Tf

        # todo SGD优化器
        self.optim = optim.SGD(
            model.parameters(),
            lr=self.lr * (1 - self.momentum),
            momentum=self.momentum)

        self.acc_stats = True

    def load_state_dict(self, state_dict: StateDict) -> None:
        super().load_state_dict(state_dict)
        self.steps = state_dict['okfac_steps']
        self.m_aa = state_dict['okfac_m_aa']
        self.m_gg = state_dict['okfac_m_gg']
        self.Q_a = state_dict['okfac_Q_a']
        self.Q_g = state_dict['okfac_Q_g']
        self.d_a = state_dict['okfac_d_a']
        self.d_g = state_dict['okfac_d_g']
        self.momentum = state_dict['okfac_momentum']
        self.stat_decay = state_dict['okfac_stat_decay']
        self.lr = state_dict['okfac_lr']
        self.kl_clip = state_dict['okfac_kl_clip']
        self.damping = state_dict['okfac_damping']
        self.weight_decay = state_dict['okfac_weight_decay']
        self.fast_cnn = state_dict['okfac_fast_cnn']
        self.Ts = state_dict['okfac_Ts']
        self.Tf = state_dict['okfac_Tf']
        self.optim.load_state_dict(state_dict['okfac_optim'])


    def state_dict(self) -> StateDict:
        state_dict = super().state_dict()
        state_dict.update({
            'okfac_steps': self.steps,
            'okfac_m_aa': self.m_aa,
            'okfac_m_gg': self.m_gg,
            'okfac_Q_a': self.Q_a,
            'okfac_Q_g': self.Q_g,
            'okfac_d_a': self.d_a,
            'okfac_d_g': self.d_g,
            'okfac_momentum': self.momentum,
            'okfac_stat_decay': self.stat_decay,
            'okfac_lr': self.lr,
            'okfac_kl_clip': self.kl_clip,
            'okfac_damping': self.damping,
            'okfac_weight_decay': self.weight_decay,
            'okfac_fast_cnn': self.fast_cnn,
            'okfac_Ts': self.Ts,
            'okfac_Tf': self.Tf,
            'okfac_optim': self.optim.state_dict()  # 保存 SGD优化器的状态
        })
        return state_dict


    def _save_input(self, module, input):
        '''
        todo 作用

        module: 当前层的模型
        input: 当前层的输入Tensor
        '''
        # 如果当前更新次数达到了Ts的倍数
        if self.steps % self.Ts == 0:
            classname = module.__class__.__name__
            layer_info = None
            if classname == 'Conv2d':
                # 如果当前层是卷积层，则记录卷积核大小、步长、填充
                layer_info = (module.kernel_size, module.stride,
                              module.padding)

            # 计算输入tensor的协方差矩阵
            aa = compute_cov_a(input[0].data, classname, layer_info,
                               self.fast_cnn)

            # Initialize buffers
            if self.steps == 0:
                # 如果是第一次更新，则将aa赋值给m_aa
                self.m_aa[module] = aa.clone()

            update_running_stat(aa, self.m_aa[module], self.stat_decay)

    def _save_grad_output(self, module, grad_input, grad_output):
        '''
        todo

        https://blog.csdn.net/weixin_44878336/article/details/133859089

        module：表示模型中的层或模块
        grad_input：一个包含输入梯度的元组(tuple)
        grad_output：一个包含输出梯度的元组(tuple)
        '''
        # todo 发现acktr的代码存在问题，这里的acc_stats没有定义，所以这里的代码是无法运行的
        #   是否也是造成实际acktr代码无法正确训练的原因
        if self.acc_stats:
            classname = module.__class__.__name__
            layer_info = None
            if classname == 'Conv2d':
                layer_info = (module.kernel_size, module.stride,
                              module.padding)

            gg = compute_cov_g(grad_output[0].data, classname,
                               layer_info, self.fast_cnn)

            # Initialize buffers
            if self.steps == 0:
                self.m_gg[module] = gg.clone()

            update_running_stat(gg, self.m_gg[module], self.stat_decay)

    def _prepare_model(self):
        # 遍历模型中的每一层，将模型中的每一层的输入和输出的梯度保存到grad_outputs中
        for module in self.model.modules():
            classname = module.__class__.__name__
            # 如果遍历到的模型层在已知的模型层中（self.known_modules）
            if classname in self.known_modules:
                # 判断是否已经将偏置bias作为单独的层，因为KFAC优化器需要将bias作为单独的层
                assert not ((classname in ['Linear', 'Conv2d']) and module.bias is not None), \
                                    "You must have a bias as a separate layer"

                self.modules.append(module)
                # 向模型层中注册前向推理过程中需要执行的一些自定义操作
                module.register_forward_pre_hook(self._save_input)
                # 向模型层中注册反向推理过程中需要执行的自定义操作
                module.register_full_backward_hook(self._save_grad_output)

    def step(self):
        '''
        自定义了优化器的step优化梯度的方法
        '''
        # Add weight decay 在本次代码中，没有使用weight_decay
        if self.weight_decay > 0:
            # 如果使用权重衰减，则在梯度上加上权重衰减
            # todo 权重衰减的作用
            for p in self.model.parameters():
                p.grad.data.add_(self.weight_decay, p.data)

        updates = {}
        # 遍历动作预测模型的每一层
        for i, m in enumerate(self.modules):
            # todo 为啥只能处理一个模型层的参数，有什么模型层有多个参数的情况吗
            assert len(list(m.parameters())
                       ) == 1, "Can handle only one parameter at the moment"
            # 当前层的名称
            classname = m.__class__.__name__
            p = next(m.parameters())

            # todo 参数作用
            la = self.damping + self.weight_decay

            # 判断当前步数是否是Tf的倍数
            if self.steps % self.Tf == 0:
                # My asynchronous implementation exists, I will add it later.
                # Experimenting with different ways to this in PyTorch.
                # todo 了解symeig函数以及数学知识
                self.d_a[m] = torch.linalg.eigvalsh(self.m_aa[m])
                self.Q_a[m] = torch.linalg.eigh(self.m_aa[m])[1]
                self.d_g[m] = torch.linalg.eigvalsh(self.m_gg[m])
                self.Q_g[m] = torch.linalg.eigh(self.m_gg[m])[1]

                self.d_a[m].mul_((self.d_a[m] > 1e-6).float())
                self.d_g[m].mul_((self.d_g[m] > 1e-6).float())

            if classname == 'Conv2d':
                p_grad_mat = p.grad.data.view(p.grad.data.size(0), -1)
            else:
                p_grad_mat = p.grad.data

            v1 = self.Q_g[m].t() @ p_grad_mat @ self.Q_a[m]
            v2 = v1 / (
                self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0) + la)
            v = self.Q_g[m] @ v2 @ self.Q_a[m].t()

            v = v.view(p.grad.data.size())
            updates[p] = v

        vg_sum = 0
        for p in self.model.parameters():
            if p not in updates:
#                print("Not found in updates: %s" % p)
                continue
            v = updates[p]
            vg_sum += (v * p.grad.data * self.lr * self.lr).sum()

        nu = min(1, math.sqrt(self.kl_clip / vg_sum))

        for p in self.model.parameters():
            if p not in updates:
#                print("Not found in updates: %s" % p)
                continue
            v = updates[p]
            p.grad.data.copy_(v)
            p.grad.data.mul_(nu)

        self.optim.step()
        # 每次调用step函数后，都会增加一次
        self.steps += 1
