import torch
from torch import nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset

import matplotlib.pyplot as plt


params = {
    "batch_size": 128,
    "lr": 0.1,
    "epochs": 80,
    "criterion": nn.MSELoss(),  # [nn.MSELoss(), nn.L1Loss(), nn.SmoothL1Loss()]
    "activation": nn.ReLU(),  # [nn.ReLU(), nn.ReLU6(), nn.LeakyReLU(), nn.Sigmoid(), nn.Tanh(), nn.ELU()]
    "optimizer": "Adam",  # ["SGD", "Adagrad", "RMSprop", "Adam"]
    "nesterov": False,  # [True, False]
    "momentum": 0,
    "weight_decay": 0,
    "eps": 1e-8,
    "alpha": 0.9,
    "betas0": 0.9,
    "betas1": 0.99,
    "lr_decay": 0,
}

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: {}'.format(device))


class MyDataset(Dataset):
    """
    Define my dataset
    """
    def __init__(self, x, y):
        self.x = x
        self.y = y

    def __getitem__(self, index):
        return self.x[index], self.y[index]

    def __len__(self):
        return len(self.x)


class MyNet(nn.Module):
    """
    Define my fully connected network
    """
    def __init__(self):
        super(MyNet, self).__init__()
        self.fc1 = nn.Linear(2, 10)
        self.fc2 = nn.Linear(10, 5)
        self.fc3 = nn.Linear(5, 1)
        self.activation = params["activation"]

    def forward(self, x):
        x = self.fc1(x)
        x = self.activation(x)
        x = self.fc2(x)
        x = self.activation(x)
        x = self.fc3(x)
        return torch.flatten(x)


def train(model, train_loader, optimizer, criterion):
    """
    Define training function
    """
    model.train()
    train_loss = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data = data.to(device)
        target = target.to(device)

        output = model(data)
        loss = criterion(output, target)
        train_loss += loss.item()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    train_loss /= len(train_loader.dataset)
    return train_loss


def val(model, val_loader, criterion):
    """
    Define validating function
    """
    model.eval()
    val_loss = 0
    with torch.no_grad():
        for data, target in val_loader:
            data = data.to(device)
            target = target.to(device)

            output = model(data)
            val_loss += criterion(output, target).item()

    val_loss /= len(val_loader.dataset)
    return val_loss


def pred(test_loader, criterion):
    """
    Define test function
    """
    print("Loading the best model parameters...")
    model = MyNet().to(device)
    model.load_state_dict(torch.load("./ckpt/best.pth"))
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for data, target in test_loader:
            data = data.to(device)
            target = target.to(device)

            output = model(data)
            test_loss += criterion(output, target).item()

    test_loss /= len(test_loader.dataset)
    return test_loss


def main():
    """
    Main function, build network, generate dataset, train, val and test.
    """
    # 生成数据
    pos_x = torch.rand(2500, 2) * 10
    neg_x = 0 - torch.rand(2500, 2) * 10
    x = torch.cat((pos_x[:2000], neg_x[:2000], pos_x[2000:2250], neg_x[2000:2250], pos_x[2250:], neg_x[2250:]))
    y = x[:, 0] ** 2 + x[:, 0] * x[:, 1] + x[:, 1] ** 2

    # 划分数据集，80%作为训练集，10%作为验证集，10%作为测试集，三个数据集都服从[-10, 10]的均匀随机分布
    train_dataset = MyDataset(x[:4000], y[:4000])
    val_dataset = MyDataset(x[4000:4500], y[4000:4500])
    test_dataset = MyDataset(x[4500:], y[4500:])

    # 加载数据集
    train_loader = DataLoader(train_dataset, batch_size=params["batch_size"], shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=params["batch_size"], shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=params["batch_size"], shuffle=False)

    # 定义神经网络
    model = MyNet()
    model = model.to(device)

    # 定义训练时的损失函数，可选
    criterion = params["criterion"]
    criterion = criterion.to(device)
    # 定义验证和测试的损失函数，固定
    criterion_test = nn.MSELoss()
    criterion_test = criterion_test.to(device)

    # 定义优化器
    if params["optimizer"] == "SGD":
        optimizer = optim.SGD(model.parameters(), lr=params["lr"], momentum=params["momentum"], weight_decay=params["weight_decay"], nesterov=params["nesterov"])
    elif params["optimizer"] == "Adagrad":
        optimizer = optim.Adagrad(model.parameters(), lr=params["lr"], lr_decay=params["lr_decay"], weight_decay=params["weight_decay"])
    elif params["optimizer"] == "RMSprop":
        optimizer = optim.RMSprop(model.parameters(), lr=params["lr"], alpha=params["alpha"], eps=params["eps"], weight_decay=params["weight_decay"], momentum=params["momentum"])
    elif params["optimizer"] == "Adam":
        optimizer = optim.Adam(model.parameters(), lr=params["lr"], betas=(params["betas0"], params["betas1"]), eps=params["eps"], weight_decay=params["weight_decay"])  # lr不宜太小，0.01是基本, 0.001时前期收敛得慢
    else:
        raise Exception("Optimizer Undefined")

    train_loss_list = []
    val_loss_list = []
    epoch_list = []
    best = 1
    # 训练神经网络
    for epoch in range(params["epochs"]):
        train_loss = train(model, train_loader, optimizer, criterion)
        val_loss = val(model, val_loader, criterion_test)

        epoch_list.append(epoch + 1)
        train_loss_list.append(train_loss)
        val_loss_list.append(val_loss)

        print("Epoch {:0>2d}: Training loss {:.4f}, Val loss {:.4f}".format(epoch + 1, train_loss, val_loss))

        # 保存在验证集上损失最小的模型参数
        if val_loss < 1 and best > val_loss:
            torch.save(model.state_dict(), './ckpt/best.pth')

    # 绘出训练和验证损失趋势图
    plt.figure()
    plt.title("Loss for Each Epoch")
    plt.grid(True)
    plt.plot(epoch_list, train_loss_list, color='red', label='Training Loss')
    plt.plot(epoch_list, val_loss_list, color='blue', label='Validating Loss')
    plt.legend()
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.ylim(0, 10)
    plt.show()

    # 测试
    test_loss = pred(test_loader, criterion_test)
    print("Test MSELoss: {:.4f}".format(test_loss))


if __name__ == '__main__':
    main()
