import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.nn.functional as F
import torch.nn as nn
import matplotlib.pyplot as plt

torch.manual_seed(111)

#可人为设定大小的超参数
LR = 0.02
BATCH_SIZE = 8
EPOCHS = 200

x = torch.unsqueeze(torch.linspace(-1,1,100), dim = 1)  #unsqueeze() 是为了体现batch的维度
y = x.pow(2) + 0.08*torch.normal(mean=torch.zeros(x.size()))        # x^2+0.08*正态分布的均值为0的随机数

# 查看生成的数据效果
# plt.scatter(x.numpy(), y.numpy())
# plt.show()

dataset = TensorDataset(x, y)
data_loader = DataLoader(
        dataset,
        batch_size = BATCH_SIZE,
        shuffle = True,
        num_workers = 0)


class NetWork(nn.Module):

    def __init__(self):
        super(NetWork, self).__init__()
        self.hidden = nn.Linear(1, 8)
        self.output = nn.Linear(8, 1)

    def forward(self, x):
        output = F.relu(self.hidden(x))  # 经过一个隐藏层后再用ReLU激活
        output = self.output(output)
        return output


# 实例化四个模型
net1=NetWork()
# net2=NetWork()
net3=NetWork()
net4=NetWork()
# net5 = NetWork()
# net6 = NetWork()
net7 = NetWork()
# net8 = NetWork()
net9 = NetWork()

# networks = [net1,net2,net3,net4,net5,net6,net7,net8,net9]  # 放入list中以便于for循环
networks = [net1,net3,net4,net7,net9]  # 放入list中以便于for循环

Adadelta = torch.optim.Adadelta(net1.parameters(),lr=1,rho=0.9,eps=1e-6,weight_decay=0)
# Adagrad = torch.optim.Adagrad(net2.parameters(),lr=0.01,lr_decay=0,weight_decay=0,initial_accumulator_value=0,eps=1e-10)
Adam = torch.optim.Adam(net3.parameters(), lr = 0.001, betas = (0.9,.999),eps=1e-8,weight_decay=0,amsgrad=False)
AdamW = torch.optim.AdamW(net4.parameters(),lr=0.001,betas=(0.9,0.999),eps=1e-8,weight_decay=0.01,amsgrad=False)
# SparseAdam = torch.optim.SparseAdam(net5.parameters(),lr=0.001,betas=(0.9,0.999),eps=1e-8)        # 这个优点问题，暂时先放着
# Adamax = torch.optim.Adamax(net5.parameters(),lr=0.002,betas=(0.9,0.999),eps=1e-8,weight_decay=0)
# ASGD = torch.optim.ASGD(net6.parameters(),lr=0.01,lambd=0.0001,alpha=0.74,t0=1000000,weight_decay=0)
# LBFGS = torch.optim.LBFGS(net7.parameters(),lr=1,max_iter=20,max_eval=None,tolerance_grad=1e-7,tolerance_change=1e-9,history_size=100,line_search_fn=None)
RMSprop = torch.optim.RMSprop(net7.parameters(),lr=0.01,alpha=0.99,eps=1e-8,weight_decay=0,momentum=0,centered=False)
# Rprop= torch.optim.Rprop(net8.parameters(),lr=0.01,etas=(0.5,1.2),step_sizes=(1e-6,50))     # 不用这个
SGD = torch.optim.SGD(net9.parameters(),lr=0.001,momentum=0,dampening=0,weight_decay=0,nesterov=False)

# optimizers = [Adadelta,Adagrad,Adam,AdamW,Adamax,ASGD,RMSprop,Rprop,SGD]  #把优化器放入list中
optimizers = [Adadelta,Adam,AdamW,RMSprop,SGD]  #把优化器放入list中

loss_func = torch.nn.MSELoss()    #损失函数用均方误差（适用于回归）
# loss_lists = [[],[],[],[],[],[],[],[],[]]       #用于记录误差
loss_lists = [[],[],[],[],[]]       #用于记录误差

for epoch in range(EPOCHS):
    for step, (b_x, b_y) in enumerate(data_loader):

        # b_x = Variable(b_x)
        # b_y = Variable(b_y)

        for network, optimizer, loss_list in zip(networks, optimizers, loss_lists):  # 把三个列表捆在一起再一个一个拿出来
            optimizer.zero_grad()
            output = network(b_x)
            loss = loss_func(output, b_y)
            loss.backward()
            optimizer.step()
            loss_list.append(loss.data.numpy())

# labels = ['Adadelta','Adagrad','Adam','AdamW','Adamax','ASGD','RMSprop','Rprop','SGD']
labels = ['Adadelta','Adam','AdamW','RMSprop','SGD']

plt.figure(1, figsize=(10, 7))
for i, l_his in enumerate(loss_lists):
    plt.plot(l_his[::32], label=labels[i])       # 改变数字8让其变得稀疏
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim(0, 0.4)
plt.show()

