import torch
import torch.nn as nn
DIM = 10
w = torch.empty(DIM)
torch.nn.init.uniform_(w,a=0.5,b=1.5)

def f(x): #定义要优化的函数，求x的最优解
    x= w*(x-1)
    return ((x+1)*(x+0.5)*x*(x-1)).sum()


def SGD(gradients, state, learning_rate=0.001):
    return -gradients * learning_rate, state


def RMS(gradients, state, learning_rate=0.1, decay_rate=0.9):
    if state is None:
        state = torch.zeros(DIM)

    state = decay_rate * state + (1 - decay_rate) * torch.pow(gradients, 2)
    update = -learning_rate * gradients / (torch.sqrt(state + 1e-5))
    return update, state


def Adam():
    return torch.optim.Adam()


Layers = 2
Hidden_nums = 20
Input_DIM = DIM
Output_DIM = DIM
# "coordinate-wise" RNN
lstm = torch.nn.LSTM(Input_DIM, Hidden_nums, Layers)
Linear = torch.nn.Linear(Hidden_nums, Output_DIM)
batchsize = 1

print(lstm)


class LSTM_Optimizee_Model(torch.nn.Module):
    """LSTM优化器"""

    def __init__(self, input_size, output_size, hidden_size, num_stacks, batchsize, preprocess=True, p=10,
                 output_scale=1):
        super(LSTM_Optimizee_Model, self).__init__()
        self.preprocess_flag = preprocess
        self.p = p
        self.output_scale = output_scale  # 论文
        self.lstm = torch.nn.LSTM(input_size, hidden_size, num_stacks)
        self.Linear = torch.nn.Linear(hidden_size, output_size)
        # elf.lstm = torch.nn.LSTM(10, 20,2)
        # elf.Linear = torch.nn.Linear(20,10)

    def LogAndSign_Preprocess_Gradient(self, gradients):
        """
        Args:
          gradients: `Tensor` of gradients with shape `[d_1, ..., d_n]`.
          p       : `p` > 0 is a parameter controlling how small gradients are disregarded
        Returns:
          `Tensor` with shape `[d_1, ..., d_n-1, 2 * d_n]`. The first `d_n` elements
          along the nth dimension correspond to the `log output` \in [-1,1] and the remaining
          `d_n` elements to the `sign output`.
        """
        p = self.p
        log = torch.log(torch.abs(gradients))
        clamp_log = torch.clamp(log / p, min=-1.0, max=1.0)
        clamp_sign = torch.clamp(torch.exp(torch.Tensor(p)) * gradients, min=-1.0, max=1.0)
        return torch.cat((clamp_log, clamp_sign), dim=-1)  # 在gradients的最后一维input_dims拼接

    def Output_Gradient_Increment_And_Update_LSTM_Hidden_State(self, input_gradients, prev_state):
        """LSTM的核心操作"""
        if prev_state is None:  # init_state
            prev_state = (torch.zeros(Layers, batchsize, Hidden_nums),
                          torch.zeros(Layers, batchsize, Hidden_nums))

        update, next_state = self.lstm(input_gradients, prev_state)

        update = Linear(update) * self.output_scale  # 因为LSTM的输出是当前步的Hidden，需要变换到output的相同形状上
        return update, next_state

    def forward(self, gradients, prev_state):

        # LSTM的输入为梯度，pytorch要求torch.nn.lstm的输入为（1，batchsize,input_dim）
        # 原gradient.size()=torch.size[5] ->[1,1,5]
        gradients = gradients.unsqueeze(0).unsqueeze(0)
        if self.preprocess_flag == True:
            gradients = self.LogAndSign_Preprocess_Gradient(gradients)

        update, next_state = self.Output_Gradient_Increment_And_Update_LSTM_Hidden_State(gradients, prev_state)

        # Squeeze to make it a single batch again.[1,1,5]->[5]
        update = update.squeeze().squeeze()
        return update, next_state


LSTM_Optimizee = LSTM_Optimizee_Model(Input_DIM * 2, Output_DIM, Hidden_nums, Layers, batchsize=1, )


# def LSTM_Optimizee(gradients, state):
#     # LSTM的输入为梯度，pytorch要求torch.nn.lstm的输入为（1，batchsize,input_dim）
#     # 原gradient.size()=torch.size[5] ->[1,1,5]
#     gradients = gradients.unsqueeze(0).unsqueeze(0)
#     if state is None:
#         state = (torch.zeros(Layers, batchsize, Hidden_nums),
#                  torch.zeros(Layers, batchsize, Hidden_nums))
#
#     update, state = lstm(gradients, state)  # 用optimizee_lstm代替 lstm
#     update = Linear(update)
#     # Squeeze to make it a single batch again.[1,1,5]->[5]
#     return update.squeeze().squeeze(), state


TRAINING_STEPS = 50
theta = torch.empty(DIM)
torch.nn.init.uniform_(theta, a=-1, b=1.0)
theta_init = torch.tensor(theta, dtype=torch.float32, requires_grad=True)


def learn(optimizee, unroll_train_steps, retain_graph_flag=False, reset_theta=False):
    """retain_graph_flag=False   默认每次loss_backward后 释放动态图
    #  reset_theta = False     默认每次学习前 不随机初始化参数"""

    if reset_theta == True:
        theta_new = torch.empty(DIM)
        torch.nn.init.uniform_(theta_new, a=-1, b=1.0)
        theta_init_new = torch.tensor(theta, dtype=torch.float32, requires_grad=True)
        x = theta_init_new
    else:
        x = theta_init

    global_loss_graph = 0  # 这个是为LSTM优化器求所有loss相加产生计算图准备的
    state = None
    x.requires_grad = True
    if optimizee != 'Adam':
        losses = []
        for i in range(unroll_train_steps):
            loss = f(x)
            # global_loss_graph += torch.exp(torch.Tensor([-i/20]))*loss
            # global_loss_graph += (0.8*torch.log10(torch.Tensor([i+1]))+1)*loss
            global_loss_graph += loss
            # print('loss{}:'.format(i),loss)
            x_grad=torch.autograd.grad(outputs=loss,inputs=x,grad_outputs=torch.ones(loss.size()),retain_graph=retain_graph_flag,create_graph=True,only_inputs=True)[0]
            update, state = optimizee(x_grad, state)
            # print(update)
            losses.append(loss.detach_())
            x = x + update

        return losses, global_loss_graph

    else:
        losses = []
        x.requires_grad = True
        optimizee = torch.optim.Adam([x], lr=0.1)

        for i in range(unroll_train_steps):
            optimizee.zero_grad()
            loss = f(x)

            global_loss_graph += loss

            loss.backward(retain_graph=retain_graph_flag)
            optimizee.step()
            losses.append(loss.detach_())
        # print(x)
        return losses, global_loss_graph


Global_Train_Steps = 100


def global_training(optimizee):
    global_loss_list = []
    adam_global_optimizer = torch.optim.Adam(optimizee.parameters(), lr=0.0001)
    _, global_loss_1 = learn(optimizee, TRAINING_STEPS, retain_graph_flag=True, reset_theta=True)
    print(global_loss_1)
    for i in range(Global_Train_Steps):
        _, global_loss = learn(optimizee, TRAINING_STEPS, retain_graph_flag=True, reset_theta=False)
        adam_global_optimizer.zero_grad()

        # print(i,global_loss)
        global_loss.backward()  # 每次都是优化这个固定的图，不可以释放动态图的缓存
        # print('xxx',[(z,z.requires_grad) for z in optimizee.parameters()  ])
        adam_global_optimizer.step()
        # print('xxx',[(z.grad,z.requires_grad) for z in optimizee.parameters()  ])
        global_loss_list.append(global_loss.detach_())

    print(global_loss)
    return global_loss_list


# 要把图放进函数体内，直接赋值的话图会丢失
# 优化optimizee
global_loss_list = global_training(LSTM_Optimizee)


# def learn(optimizee, unroll_train_steps, retain_graph_flag=False, reset_theta=False):
#     """retain_graph_flag=False   默认每次loss_backward后 释放动态图
#     #  reset_theta = False     默认每次学习前 不随机初始化参数"""
#
#     if reset_theta == True:
#         theta_new = torch.empty(DIM)
#         torch.nn.init.uniform_(theta_new, a=-1, b=1.0)
#         theta_init_new = torch.tensor(theta, dtype=torch.float32, requires_grad=True)
#         x = theta_init_new
#     else:
#         x = theta_init
#
#     global_loss_graph = 0  # 这个是为LSTM优化器求所有loss相加产生计算图准备的
#     state = None
#
#     if optimizee.__name__ != 'Adam':
#         losses = []
#         for i in range(unroll_train_steps):
#
#             loss = f(x)
#
#             # global_loss_graph += torch.exp(torch.Tensor([-i/20]))*loss
#             # global_loss_graph += (0.8*torch.log10(torch.Tensor([i+1]))+1)*loss
#             global_loss_graph += loss
#
#             #loss.backward(retain_graph=retain_graph_flag)  # 默认为False,当优化LSTM设置为True
#             x_grad=torch.autograd.grad(outputs=loss,inputs=x,grad_outputs=torch.ones(loss.size()),retain_graph=retain_graph_flag,create_graph=True,only_inputs=True)[0]
#             update, state = optimizee(x_grad, state)
#
#             losses.append(loss.detach_())
#
#             x=x+update
#             # x = x.detach_()
#             # 这个操作 直接把x中包含的图给释放了，
#             # 那传递给下次训练的x从子节点变成了叶节点，那么梯度就不能沿着这个路回传了，
#             # 之前写这一步是因为这个子节点在下一次迭代不可以求导，那么应该用x.retain_grad()这个操作，
#             # 然后不需要每次新的的开始给x.requires_grad = True
#
#             #x.retain_grad()
#             # print(x.retain_grad())
#
#         # print(x)
#         return losses, global_loss_graph
#
#     else:
#         losses = []
#         x.requires_grad = True
#         optimizee = torch.optim.Adam([x], lr=0.1)
#
#         for i in range(unroll_train_steps):
#             optimizee.zero_grad()
#             loss = f(x)
#             global_loss_graph += loss
#
#             loss.backward(retain_graph=retain_graph_flag)
#             optimizee.step()
#             losses.append(loss.detach_())
#         # print(x)
#         return losses, global_loss_graph


def global_training(optimizee):
    global_loss_list = []
    adam_global_optimizer = torch.optim.Adam([{'params': optimizee.parameters()}, {'params': Linear.parameters()}],
                                             lr=0.0001)

    for i in range(Global_Train_Steps):
        loss_list, global_loss = learn(LSTM_Optimizee, TRAINING_STEPS, retain_graph_flag=True, reset_theta=False)
        adam_global_optimizer.zero_grad()
        # print(loss_list)
        # print(i,global_loss)
        global_loss.backward()  # 每次都是优化这个固定的图，不可以释放动态图的缓存
        #print('xxx',[(z,z.requires_grad) for z in optimizee.parameters()  ])
        adam_global_optimizer.step()
        #print('xxx',[(z.grad,z.requires_grad) for z in optimizee.parameters()  ])
        global_loss_list.append(global_loss.detach_())
    return global_loss_list


# 要把图放进函数体内，直接赋值的话图会丢失
# 优化optimizee
global_loss_list = global_training(lstm)

import matplotlib
import matplotlib.pyplot as plt
import numpy as np

T = np.arange(TRAINING_STEPS)
for _ in range(1):
    sgd_losses, sgd_sum_loss = learn(SGD, TRAINING_STEPS, reset_theta=True)
    rms_losses, rms_sum_loss = learn(RMS, TRAINING_STEPS, reset_theta=True)
    adam_losses, adam_sum_loss = learn(Adam, TRAINING_STEPS, reset_theta=True)
    lstm_losses,lstm_sum_loss=learn(LSTM_Optimizee,TRAINING_STEPS,reset_theta=True)
    p1, = plt.plot(T, sgd_losses, label='SGD')
    p2, = plt.plot(T, rms_losses, label='RMS')
    p3, = plt.plot(T, adam_losses, label='Adam')
    p4,=plt.plot(T,lstm_losses,label='LSTM')
    plt.legend(handles=[p1, p2, p3,p4])
    plt.title('Losses')
    plt.show()
    print("sum_loss:sgd={},rms={},adam={},lstm={}".format(sgd_sum_loss, rms_sum_loss, adam_sum_loss,lstm_sum_loss))
