import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import transforms, utils
from densenet import DenseNet
from densenet128 import DenseNet128
from densenet256 import DenseNet256
from densenet1024 import DenseNet1024
import torch.nn.functional as F
import argparse
from tqdm import tqdm
import datetime
# 多GPU训练模型
# 保存与读取模型方法
# 保存模型使用torch.save(model.module.state_dict(), args.ckpt)
# 读取模型使用model.load_state_dict(torch.load(args.ckpt), False)
# 其中model都通过model = nn.DataParallel(model)包装

# 是否使用cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')

def train_model(model, criterion, optimizer, dataloaders, args):
    model.train()
    num_epochs = args.epoch
    start_num_epochs = args.epoch_at
    max_acc = 0
    for epoch in range(start_num_epochs, num_epochs):
        print('Epoch {}/{}'.format(epoch + 1, num_epochs))
        print('-' * 10)
        epoch_loss = 0
        epoch_acc = 0
        step = 0
        for data0, data1 in tqdm(dataloaders):
            step += 1
            inputs = data0.to(device)
            labels = data1.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_acc += accuracy(outputs, labels, topk=(1,))[0]
        
        if max_acc < epoch_acc/step:
            max_acc = epoch_acc/step
            # 全保存，使用torch.load(args.ckpt)去加载
            torch.save(model, "{}_{}_{}".format(time,str(args.size),args.ckpt))
        print("epoch %d loss:%0.3f, acc:%0.3f" % (epoch, epoch_loss/step, epoch_acc/step))
    return model

def accuracy(outputs, labels, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    # 设置取top1 最大值
    # 例如 [0.2, 0.1, 0.8, 1.2]
    maxk = max(topk)
    batch_size = labels.size(0)
    # _最大值 pred索引
    # _ = 1.2 pred = 3 
    _, pred = outputs.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(labels.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res

def test_predict(outputs, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    # 设置取top1 最大值
    # 例如 [0.2, 0.1, 0.8, 1.2]
    maxk = max(topk)
    # _最大值 pred索引
    # _ = 1.2 pred = 3 
    _, pred = outputs.topk(maxk, 1, True, True)
    result_class = [0, 0, 0, 0]
    for p in pred:
        result_class[p]+=1
    #print(result_class)
    predict_result = result_class.index(max(result_class))
    return predict_result

def test_model(model, dataloaders, args):
    # 使用多GPU训练，单GPU测试模型时，保存模型用torch.save(model.module.state_dict(), args.ckpt)
    # model.load_state_dict(torch.load(args.ckpt), False)
    # model.load_state_dict(torch.load(args.ckpt))
    # 直接torch.save(model, args.ckpt)完全保存模型+图的时候使用下边读取模型
    model = torch.load(args.ckpt) # 加载all的模型使用这个
    model.eval()
    correct_count = 0
    with torch.no_grad():
        for data0, data1 in tqdm(dataloaders):
            inputs = data0.to(device)
            labels = data1.to(device)
            heigh = inputs.size()[2]
            width = inputs.size()[3]
            stride, size = args.stride, args.size
            for w in range(0, width-stride*(1-stride//size), stride):
                for h in range(0, heigh-stride*(1-stride//size), stride):
                    if h == 0 and w == 0:
                        merge = inputs[:,:,h:h+size,w:w+size]
                    else:
                        merge = torch.cat((merge, inputs[:,:,h:h+size,w:w+size]), 0)
            outputs = model(merge)
            if test_predict(outputs, topk=(1,)) == labels[0]:
                correct_count +=1
    print("acc:%0.2f" % (correct_count/len(dataloaders)*args.batch_size))


if __name__=="__main__":
    parser = argparse.ArgumentParser(description='Densenet for ICIAR2018')
    parser.add_argument("action", type=str, help="输入train or test")
    parser.add_argument('--dataset', '-d', type=str, required=True, help='数据集路径')
    parser.add_argument('--depth', '-t', type=int, default=20, help='DenseNet深度，默认20')
    parser.add_argument('--growth_rate', '-g', type=int, default=12, help='DenseNet增长率，默认12')
    parser.add_argument('--drop_rate', '-r', type=float, default=0.5, help='dropout rate, 默认0.5')
    parser.add_argument('--num_classes', '-n', type=int, default=4, help='几分类，默认4')
    parser.add_argument('--epoch', '-e', type=int, default=100, help='训练轮数，默认100')
    parser.add_argument('--batch_size', '-b', type=int, default=30, help='数据批数，默认35，即一轮训练35条数据')
    parser.add_argument('--ckpt', '-c', type=str, default="model.pth", help='训练模型保存文件,以及测试集需要模型，默认model.pth')
    parser.add_argument('--stride', type=int, default=256, help='可选128, 256, 512')
    parser.add_argument('--size', type=int, default=512, help='可选128, 256, 512, 1024')
    parser.add_argument('--cont', type=int, default=None, help='中断后继续跑')
    parser.add_argument('--epoch_at', type=int, default=0, help='中断后继续跑第几轮')
    
    args = parser.parse_args()

    img_data = torchvision.datasets.ImageFolder(args.dataset,
                            transform=transforms.Compose([
                                transforms.ToTensor()
                                ]))
    dataloaders = torch.utils.data.DataLoader(img_data, batch_size=args.batch_size, shuffle=True, num_workers = 2)
    
    if args.cont:
        model=torch.load(args.ckpt)
    else:
        if args.size == 128:
            model=DenseNet128(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                bottleneck=True, dropRate=args.drop_rate)
        elif args.size == 256:
            model=DenseNet256(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                bottleneck=True, dropRate=args.drop_rate)
        elif args.size == 512:
            model=DenseNet(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                bottleneck=True, dropRate=args.drop_rate)
        elif args.size == 1024:
            model=DenseNet1024(args.depth, args.num_classes, args.growth_rate, reduction=0.5,
                                bottleneck=True, dropRate=args.drop_rate)

    if torch.cuda.device_count() > 1:
       model = nn.DataParallel(model)
    model.to(device)

    if args.action == "train":
        criterion = nn.CrossEntropyLoss().to(device)
        optimizer = optim.Adagrad(model.parameters())
        train_model(model, criterion, optimizer, dataloaders, args)
    elif args.action == "test":
        test_model(model, dataloaders, args)
