#  导入包和定义参数
import sys
import torch
import torch.utils.data as tud
import matplotlib.pyplot as plt
import torch.nn as nn
from tqdm import tqdm
import argparse
from network import MLP


def parse_option():
    parser = argparse.ArgumentParser('MLP')
    parser.add_argument('--epochs', type=int, help='the number of epoch', default=3)
    parser.add_argument("--save_path", type=str, help="the path of model saved",
                        default='../../models/MLP.pth')
    args = parser.parse_args()  # 也可直接使用 args, _ = parser.parse_known_args()
    return args


def main(args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    #  定义数据，这里是自己建立
    torch.manual_seed(1)
    n_data = torch.ones(1000, 2)  # 内容为一个 100 行 2 列 的 tensor
    x0 = torch.normal(1 * n_data, 1)
    y0 = torch.zeros(1000)
    x1 = torch.normal(-4 * n_data, 1)
    y1 = torch.ones(1000)
    x2 = torch.normal(5 * n_data, 1)
    y2 = torch.ones(1000) * 2
    x = torch.cat((x0, x1, x2)).type(torch.FloatTensor)
    y = torch.cat((y0, y1, y2)).type(torch.LongTensor)
    torch_dataset = tud.TensorDataset(x, y)

    #  画图测试看下数据集合
    plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')
    plt.show()

    train_size, val_size = int(len(torch_dataset)*0.8), len(torch_dataset) - int(len(torch_dataset)*0.8)
    train_dataset, val_dataset = tud.random_split(torch_dataset, [train_size, val_size])
    train_loader = tud.DataLoader(dataset=train_dataset,
                                  batch_size=32,
                                  shuffle=True, )
    val_loader = tud.DataLoader(dataset=val_dataset,
                                batch_size=32,
                                shuffle=True, )
    print("using {}  for training, {}   for validation.".format(train_size, val_size))

    net = MLP()
    net.to(device)

    # pata = list(net.parameters())
    optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)  # 设置学习率下降策略
    criterion = nn.CrossEntropyLoss()

    best_acc = 0.0
    for epoch in range(args.epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout)
        for step, train_data in enumerate(train_bar):
            train_x, labels = train_data
            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(train_x.to(device))
            loss = criterion(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if step % 2000 == 1999:  # print every 2000 mini-batches
                print(f'train_data:[{epoch + 1}, {step + 1:5d}] loss: {running_loss / 2000:.3f}')
                running_loss = 0.0

        scheduler.step()  # 更新学习率
       # print(scheduler.get_last_lr())

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(val_loader, file=sys.stdout)
            for val_bar in val_bar:
                val_x, val_labels = val_bar
                outputs = net(val_x.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_size
        print(f'[epoch {epoch+1}] , val_accuracy: {val_accurate:.3f}')
        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), args.save_path)
    print('Finished Training')


if __name__ == '__main__':
    args_ = parse_option()
    main(args_)
