import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data
from torch.optim.optimizer import Optimizer
import math
import numpy as np
import matplotlib.pyplot as plt


epoch_ci = 500   #训练次数
LR = 0.01
N_EPOCHS = epoch_ci     # 训练的 epoch 次数
BATCH_SIZE = 20     # 训练的 batch size （个数）

xs = [i for i in range(epoch_ci)]  # 横坐标的点数等于画图次数
ysAdam = []
ysMomentum = []
ysRadam = []
ysSGD = []

def drawPlot(xs,ysAdam,ysMomentum,ysRadam):   #画图

    plt.plot(xs,ysAdam,"red",label = "loss Adam") #画曲线Adam
    plt.plot(xs, ysMomentum, "blue", label="loss Momentum")  # 画曲线Momentum
    plt.plot(xs, ysRadam, "black", label="loss Radam")  # 画曲线Radam
    plt.legend() #显示右上角的各条折线说明
    plt.xlabel("epoch")  #显示x,y的坐标名字
    plt.ylabel("loss")
    plt.show()


# RAdam算法
class RAdam(Optimizer):

    def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=False):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
        if not 0.0 <= betas[1] < 1.0:
            raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))

        self.degenerated_to_sgd = degenerated_to_sgd
        if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
            for param in params:
                if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
                    param['buffer'] = [[None, None, None] for _ in range(10)]
        defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
                        buffer=[[None, None, None] for _ in range(10)])
        super(RAdam, self).__init__(params, defaults)

    def __setstate__(self, state):
        super(RAdam, self).__setstate__(state)

    def step(self, closure=None):

        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:

            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data.float()
                if grad.is_sparse:
                    raise RuntimeError('RAdam does not support sparse gradients')

                p_data_fp32 = p.data.float()

                state = self.state[p]

                if len(state) == 0:
                    state['step'] = 0
                    state['exp_avg'] = torch.zeros_like(p_data_fp32)
                    state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
                else:
                    state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
                    state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)

                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
                beta1, beta2 = group['betas']

                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
                exp_avg.mul_(beta1).add_(1 - beta1, grad)

                state['step'] += 1
                buffered = group['buffer'][int(state['step'] % 10)]
                if state['step'] == buffered[0]:
                    N_sma, step_size = buffered[1], buffered[2]
                else:
                    buffered[0] = state['step']
                    beta2_t = beta2 ** state['step']
                    N_sma_max = 2 / (1 - beta2) - 1
                    N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
                    buffered[1] = N_sma

                    # more conservative since it's an approximated value
                    if N_sma >= 5:
                        step_size = math.sqrt(
                            (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
                                        N_sma_max - 2)) / (1 - beta1 ** state['step'])
                    elif self.degenerated_to_sgd:
                        step_size = 1.0 / (1 - beta1 ** state['step'])
                    else:
                        step_size = -1
                    buffered[2] = step_size

                # more conservative since it's an approximated value
                if N_sma >= 5:
                    if group['weight_decay'] != 0:
                        p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
                    denom = exp_avg_sq.sqrt().add_(group['eps'])
                    p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
                    p.data.copy_(p_data_fp32)
                elif step_size > 0:
                    if group['weight_decay'] != 0:
                        p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
                    p_data_fp32.add_(-step_size * group['lr'], exp_avg)
                    p.data.copy_(p_data_fp32)

        return loss


class MyDataset(torch.utils.data.Dataset):      #输入的数据  不同数据时需要改变
    def __init__(self,data_path):
        self.data_path = data_path
        self.load_data(data_path)
    def load_data(self, data_path):
        data = np.loadtxt(data_path)
        self.size = data.shape[0]
        self.X = data[:, :32]    #取左列的四个
        self.Y = data[:, 32:]    #取左列的空下五个
    def __len__(self):
        return self.size
    def __iter__(self):
        return self
    def __getitem__(self, index):
        # 获得原始输入
        return self.X[index, :], self.Y[index]

train_set = MyDataset('./data/data_xun32.txt')    #将数据加载到数据处理的包中

train_loader = torch.utils.data.DataLoader(
        dataset=train_set,
        batch_size=BATCH_SIZE,
        shuffle=True)


class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(32, 16),   # 对应输入层x
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True),
            nn.Linear(16, 32),
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
        )
        self.decoder = nn.Sequential(
            nn.Linear(32, 16),
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True),
            nn.Linear(16, 5),   # 对应输出层y
        )
    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return decoded


def main():

    modelSGD = MLP()    # 输入参数
    modelMomentum = MLP()  # 输入参数
    modelAdam = MLP()  # 输入参数
    modelRAdam = MLP()  # 输入参数

    opt_SGD = optim.SGD(modelSGD.parameters(), lr=LR)
    opt_Momentum = optim.SGD(modelMomentum.parameters(), lr=LR, momentum=0.9)
    opt_Adam = optim.Adam(modelAdam.parameters(), lr=LR, betas=(0.9, 0.99))
    opt_RAdam = RAdam(modelRAdam.parameters(), lr=LR, weight_decay=0)    # 设置模型



    # training  opt_Adam
    for epoch in range(N_EPOCHS):

        for b_index, (x, y) in enumerate(train_loader):
            x = x.view(x.size()[0], -1)
            decoded = modelAdam(x.float())  # decoded为经过MLP模型之后的输出结果。
            mse_lossAdam = F.smooth_l1_loss(decoded, y.float())  # 定义损失函数

            ysAdam.append(mse_lossAdam) #区别

            opt_Adam.zero_grad()     # 修改模型
            mse_lossAdam.backward()
            opt_Adam.step()          # 修改模型

    # training  opt_Momentum
    for epoch in range(N_EPOCHS):

        for b_index, (x, y) in enumerate(train_loader):
            x = x.view(x.size()[0], -1)
            decoded = modelMomentum(x.float())  # decoded为经过MLP模型之后的输出结果。
            mse_lossMomentum = F.smooth_l1_loss(decoded, y.float())  # 定义损失函数

            ysMomentum.append(mse_lossMomentum) #区别

            opt_Momentum.zero_grad()     # 修改模型
            mse_lossMomentum.backward()
            opt_Momentum.step()          # 修改模型

    # training  opt_RAdam
    for epoch in range(N_EPOCHS):

        for b_index, (x, y) in enumerate(train_loader):
            x = x.view(x.size()[0], -1)
            decoded = modelRAdam(x.float())  # decoded为经过MLP模型之后的输出结果。
            mse_lossRAdam = F.smooth_l1_loss(decoded, y.float())  # 定义损失函数

            ysRadam.append(mse_lossRAdam) #区别

            opt_RAdam.zero_grad()     # 修改模型
            mse_lossRAdam.backward()
            opt_RAdam.step()          # 修改模型


    print("Saving state, iter:", str(epoch + 1)), torch.save(modelAdam.state_dict(),
                                                             'logs/Adam32_Epoch%d.pth' % ((epoch + 1)))
    print("Saving state, iter:", str(epoch + 1)), torch.save(modelMomentum.state_dict(),
                                                             'logs/Momentum32_Epoch%d.pth' % ((epoch + 1)))
    print("Saving state, iter:", str(epoch + 1)), torch.save(modelRAdam.state_dict(),
                                                             'logs/RAdam32_Epoch%d.pth' % ((epoch + 1)))


    drawPlot(xs, ysAdam,ysMomentum,ysRadam)


if __name__ == '__main__':
    main()

