import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import transforms, utils
from densenet import DenseNet
from densenet128 import DenseNet128
from densenet224 import DenseNet224
from densenet256 import DenseNet256
from densenet1024 import DenseNet1024
import argparse
from tqdm import tqdm
import datetime

# 多GPU训练模型
# 保存与读取模型方法
# 保存模型使用torch.save(model.module.state_dict(), args.ckpt)
# 读取模型使用model.load_state_dict(torch.load(args.ckpt), False)
# 其中model都通过model = nn.DataParallel(model)包装

# 是否使用cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')


def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Unsupported value encountered.')


def train_model(model, criterion, optimizer, dataloaders, args, start_num_epochs=0):
    model.train()
    num_epochs = args.epoch
    max_acc = 0
    max_validation_acc = 0
    for epoch in range(start_num_epochs, num_epochs):
        print('Epoch {}/{}'.format(epoch + 1, num_epochs))
        print('-' * 10)
        epoch_loss = 0
        epoch_acc = 0

        step = 0
        for data0, data1 in tqdm(dataloaders):
            step += 1
            inputs = data0.to(device)
            labels = data1.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_acc += accuracy(outputs, labels, topk=(1,))[0]

        # 验证集
        validation_acc = validation(model, args.size, args.split_num)

        if max_acc < epoch_acc / step:
            max_acc = epoch_acc / step
            if max_validation_acc < validation_acc:
                max_validation_acc = validation_acc
                # 全保存，使用torch.load(args.ckpt)去加载
                # 模型保存
                state = {
                    'epoch': epoch,
                    'model': model,
                    'optimizer': optimizer,
                    'model_dict': model.state_dict(),
                    'optimizer_dict': optimizer.state_dict()
                }
            if args.cont:
                torch.save(state, "{}".format(args.ckpt))
            else:
                torch.save(state, "state_{}_{}_{}".format(time, str(args.size), args.ckpt))
        print("epoch %d loss:%0.3f, acc:%0.3f, validation_acc:%0.3f" % (
        epoch, epoch_loss / step, epoch_acc / step, validation_acc))
    return model


def accuracy(outputs, labels, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    # 设置取top1 最大值
    # 例如 [0.2, 0.1, 0.8, 1.2]
    maxk = max(topk)
    batch_size = labels.size(0)
    # _最大值 pred索引
    # _ = 1.2 pred = 3 
    _, pred = outputs.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(labels.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res


def test_predict(outputs, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    # 设置取top1 最大值
    # 例如 [0.2, 0.1, 0.8, 1.2]
    maxk = max(topk)
    # _最大值 pred索引
    # _ = 1.2 pred = 3 
    _, pred = outputs.topk(maxk, 1, True, True)
    result_class = [0, 0, 0, 0]
    for p in pred:
        result_class[p] += 1
    # print(result_class)
    predict_result = result_class.index(max(result_class))
    return predict_result


# 验证
def validation(model, size, num):
    validation_img_data = torchvision.datasets.ImageFolder("Photos/Validation/",
                                                           transform=transforms.Compose([
                                                               transforms.ToTensor()
                                                           ]))
    validation_dataloaders = torch.utils.data.DataLoader(validation_img_data, batch_size=1, shuffle=True, num_workers=2)

    model.eval()
    correct_count = 0
    # 数据随机增强
    image_aug = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomRotation([0, 10]),
        transforms.RandomCrop(size)
    ])
    # PIL图像转tensor
    loader = transforms.Compose([
        transforms.ToTensor()])
    # tensor转PIL图像
    unloader = transforms.ToPILImage()

    with torch.no_grad():
        for data0, data1 in tqdm(validation_dataloaders):
            inputs = data0
            labels = data1.to(device)
            image = unloader(inputs[0].squeeze(0))
            for i in range(num):
                if i == 0:
                    merge = loader(image_aug(image)).unsqueeze(0).to(device, torch.float)
                else:
                    merge = torch.cat((merge, loader(image_aug(image)).unsqueeze(0).to(device, torch.float)), 0)
            outputs = model(merge)
            if test_predict(outputs, topk=(1,)) == labels[0]:
                correct_count += 1
    return correct_count / len(validation_dataloaders)


# 滑窗分割图像进行投票测试准确率
def test_model(model, dataloaders, args):
    model.eval()
    correct_count = 0
    with torch.no_grad():
        for data0, data1 in tqdm(dataloaders):
            inputs = data0.to(device)
            labels = data1.to(device)
            heigh = inputs.size()[2]
            width = inputs.size()[3]
            stride, size = args.stride, args.size
            for w in range(0, width - stride * (1 - stride // size), stride):
                for h in range(0, heigh - stride * (1 - stride // size), stride):
                    if h == 0 and w == 0:
                        merge = inputs[:, :, h:h + size, w:w + size]
                    else:
                        merge = torch.cat((merge, inputs[:, :, h:h + size, w:w + size]), 0)
            outputs = model(merge)
            if test_predict(outputs, topk=(1,)) == labels[0]:
                correct_count += 1
    print("acc:%0.2f" % (correct_count / len(dataloaders) * args.batch_size))


# 随机分割图像进行投票测试准确率
def test_model_random(model, dataloaders, args):
    model.eval()
    correct_count = 0
    # 数据随机增强
    image_aug = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomRotation([0, 10]),
        transforms.RandomCrop(args.size)
    ])
    # PIL图像转tensor
    loader = transforms.Compose([
        transforms.ToTensor()])
    # tensor转PIL图像
    unloader = transforms.ToPILImage()

    with torch.no_grad():
        for data0, data1 in tqdm(dataloaders):
            inputs = data0
            labels = data1.to(device)
            image = unloader(inputs[0].squeeze(0))
            for i in range(args.split_num):
                if i == 0:
                    merge = loader(image_aug(image)).unsqueeze(0).to(device, torch.float)
                else:
                    merge = torch.cat((merge, loader(image_aug(image)).unsqueeze(0).to(device, torch.float)), 0)
            outputs = model(merge)
            if test_predict(outputs, topk=(1,)) == labels[0]:
                correct_count += 1
    print("random acc:%0.2f" % (correct_count / len(dataloaders) * args.batch_size))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Densenet for ICIAR2018')
    parser.add_argument("action", type=str, help="输入train or test")
    parser.add_argument('--dataset', '-d', type=str, required=True, help='数据集路径')
    parser.add_argument('--depth', '-t', type=int, default=20, help='DenseNet深度，默认20')
    parser.add_argument('--growth_rate', '-g', type=int, default=12, help='DenseNet增长率，默认12')
    parser.add_argument('--drop_rate', '-r', type=float, default=0.5, help='dropout rate, 默认0.5')
    parser.add_argument('--num_classes', '-n', type=int, default=4, help='几分类，默认4')
    parser.add_argument('--epoch', '-e', type=int, default=100, help='训练轮数，默认100')
    parser.add_argument('--batch_size', '-b', type=int, default=30, help='数据批数，默认35，即一轮训练35条数据')
    parser.add_argument('--ckpt', '-c', type=str, default="model.pth", help='训练模型保存文件,以及测试集需要模型，默认model.pth')
    parser.add_argument('--stride', type=int, default=256, help='测试集滑窗分块步长，可选128, 256, 512')
    parser.add_argument('--size', type=int, default=512, help='可选128, 256, 512, 1024')
    parser.add_argument('--split_num', type=int, default=35, help='测试集随机分块为多少块')
    parser.add_argument('--split_random', type=str2bool, default='True', help='是否随机分块')
    parser.add_argument('--cont', type=str2bool, default='False', help='中断后继续跑')

    args = parser.parse_args()

    img_data = torchvision.datasets.ImageFolder(args.dataset,
                                                transform=transforms.Compose([
                                                    transforms.ToTensor()
                                                ]))
    dataloaders = torch.utils.data.DataLoader(img_data, batch_size=args.batch_size, shuffle=True, num_workers=2)

    if args.action == "train":
        # 中断恢复
        if args.cont:
            checkpoint = torch.load(args.ckpt)
            model = checkpoint['model']
            optimizer = checkpoint['optimizer']
            start_num_epochs = checkpoint['epoch'] + 1
        else:
            if args.size == 128:
                model = DenseNet128(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                    bottleneck=True, dropRate=args.drop_rate)
            elif args.size == 256:
                model = DenseNet256(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                    bottleneck=True, dropRate=args.drop_rate)
            elif args.size == 512:
                model = DenseNet(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                 bottleneck=True, dropRate=args.drop_rate)
            elif args.size == 1024:
                model = DenseNet1024(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                     bottleneck=True, dropRate=args.drop_rate)
            elif args.size == 224:
                depth = 40 # (depth - 4) % 3 == 0
                if (depth - 4) % 3:
                    raise Exception('Invalid depth')
                block_config = [(depth - 4) // 6 for _ in range(3)]
                model = DenseNet224(block_config=block_config, num_classes=args.num_classes, efficient=True)
            if torch.cuda.device_count() > 1:
                model = nn.DataParallel(model)
            model.to(device)
            optimizer = optim.Adagrad(model.parameters())
            start_num_epochs = 0

        criterion = nn.CrossEntropyLoss().to(device)
        train_model(model, criterion, optimizer, dataloaders, args, start_num_epochs)
    elif args.action == "test":
        checkpoint = torch.load(args.ckpt)
        model = checkpoint['model']
        if args.split_random:
            test_model_random(model, dataloaders, args)
        else:
            test_model(model, dataloaders, args)
