from __future__ import print_function
import argparse  # Python 命令行解析工具
import os

import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from tqdm import tqdm
import numpy as np
import models
from nodes import DatasetSplit


def train(args, model, device, train_loader, optimizer, epoch):
    model.train()
    total_loss = 0.0
    avg_loss = 0.0
    correct = 0.0
    acc = 0.0
    description = "loss={:.4f} acc={:.2f}%"

    with tqdm(train_loader) as epochs:
        for idx, (data, target) in enumerate(epochs):
            optimizer.zero_grad()
            epochs.set_description(description.format(avg_loss, acc))
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            optimizer.step()
            total_loss += loss.detach().item()
            avg_loss = total_loss / (idx + 1)
            pred = output.argmax(dim=1)
            correct += pred.eq(target.view_as(pred)).sum().item()
            acc = correct / len(train_loader.dataset) * 100


def split_sub_dataset(idxs, dataset):
    np.random.shuffle(idxs)
    data_loader = DataLoader(DatasetSplit(dataset, idxs),
                             batch_size=64, num_workers=4, shuffle=True)
    return data_loader


def split_id(data_loader):
    idxs = np.arange(len(data_loader.dataset))
    labels = np.array(data_loader.dataset.targets)
    idxs_labels = np.vstack((idxs, labels))
    idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
    idxs = idxs_labels[0, :]
    lenth = int(len(data_loader.dataset) / 10)
    id = idxs[0:lenth]  # 每个类别只有500个，500*100=50000，只选取10个类别，共500*10=5000个样本
    return id


def test(args, model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.cross_entropy(output, target, size_average=False).item()  # sum up batch loss
            pred = torch.max(output, 1)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    print('\n Test_set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'
          .format(test_loss, correct, len(test_loader.dataset),
                  100. * correct / len(test_loader.dataset)))
    return 100. * correct / len(test_loader.dataset), test_loss


def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=2020, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")
    train_loader = torch.utils.data.DataLoader(
        datasets.CIFAR100('~/data', train=True, download=False,
                          transform=transforms.Compose(
                              [
                                  transforms.RandomCrop(32, padding=4),
                                  transforms.RandomHorizontalFlip(),
                                  transforms.ToTensor(),
                                  transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                              ])),
        batch_size=64, shuffle=True, num_workers=4)

    test_loader = torch.utils.data.DataLoader(
        datasets.CIFAR100('~/data', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])),
        batch_size=64, shuffle=True, num_workers=4)

    train_data = split_sub_dataset(split_id(train_loader), train_loader.dataset)
    test_data = split_sub_dataset(split_id(test_loader), test_loader.dataset)

    model = models.CNNCifar10().to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_data, optimizer, epoch)
        test(args, model, device, test_data)

    save_path = "../save/pretrain_models"
    if not os.path.isdir(save_path):
        os.makedirs(save_path)
    torch.save(model.state_dict(), '{}/cnn_cifar100-10_epoch200.pkl'.format(save_path))


if __name__ == '__main__':
    main()
