import os
import sys
import time
import shutil
import torch
from datetime import datetime
import argparse
from loguru import logger
from tqdm import tqdm

import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision import transforms, datasets
from torch.utils.tensorboard import SummaryWriter
import torch.optim.lr_scheduler as lr_scheduler
# from torchvision.models.mobilenet import mobilenet_v2
from network import MobileNetV2

logger.add(f"../../log/{str(datetime.today())[:10]}_MobileNetV2_train.log", level='INFO', encoding="utf-8", enqueue=True)


def time_it(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"Function {func.__name__} took {(end_time - start_time) :.3f} seconds to run.")
        return result
    return wrapper


def train(train_loader, model, criterion, optimizer, device, epoch):
    model.train()
    loss_function = torch.nn.CrossEntropyLoss()
    mean_loss = torch.zeros(1).to(device)
    optimizer.zero_grad()

    with tqdm(train_loader, unit="batch") as tepoch:
        for step, data in enumerate(tepoch):
            tepoch.set_description(f"Epoch {epoch}")

            images, labels = data
            pred = model(images.to(device))

            loss = criterion(pred, labels.to(device))
            loss.backward()
            # loss = reduce_value(loss, average=True)
            mean_loss = (mean_loss * step + loss.detach()) / (step + 1)  # update mean losses
            if not torch.isfinite(loss):
                logger.error('WARNING: non-finite loss, ending training ', loss)
                sys.exit(1)
            optimizer.step()
            optimizer.zero_grad()

            tepoch.set_postfix(loss=loss.item())
        # 等待所有进程计算完毕
        if device != torch.device("cpu"):
            torch.cuda.synchronize(device)

        return mean_loss.item()


@torch.no_grad()
def evaluate(val_loader, model, device):
    model.eval()
    # 用于存储预测正确的样本个数
    sum_num = torch.zeros(1).to(device)
    for step, data in enumerate(val_loader):
        images, labels = data
        pred = model(images.to(device))
        pred = torch.max(pred, dim=1)[1]
        sum_num += torch.eq(pred, labels.to(device)).sum()
    return sum_num.item()


def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, '../../models/MobileNetV2_best.pth')


@time_it
def main(args):
    # When single-card training, use cuda:x to select a free card
    args.device = torch.device("cuda:7" if torch.cuda.is_available() else "cpu")
    args.workers = min([os.cpu_count(), args.batch_size if args.batch_size > 1 else 0, 8])  # number of workers

    logger.info(args)
    print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
    tb_writer = SummaryWriter()

    # Data loading code
    if args.dummy:
        print("=> Dummy data is used!")
        train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor())
        val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor())
    else:
        traindir = os.path.join(args.data_path, 'train')
        valdir = os.path.join(args.data_path, 'test')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        val_dataset = datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # save:  daisy,dandelion,roses,sunflowers,tulips
    classes_list = train_dataset.classes
    with open('../../datasets/flowers/classes.txt', 'w') as text_cls:
        classes_str = ','.join(classes_list)
        text_cls.write(classes_str)

    train_num = len(train_loader)  # 训练总数/batch_size
    val_num = len(val_loader)
    logger.info(f"using {train_num} batchs/epoch for training, {val_num} batchs/epoch  for validation.")

    model = MobileNetV2(num_classes=5)

    # # load pretrain weights
    # # download url: https://download.pytorch.org/models/mobilenet_v2-b0353104.pth
    # model_weight_path = "./mobilenet_v2-b0353104.pth"
    # assert os.path.exists(model_weight_path), "file {} dose not exist.".format(model_weight_path)
    # pre_weights = torch.load(model_weight_path, map_location='cpu')
    # # delete classifier weights
    # pre_dict = {k: v for k, v in pre_weights.items() if model.state_dict()[k].numel() == v.numel()}
    # missing_keys, unexpected_keys = model.load_state_dict(pre_dict, strict=False)
    # # freeze features weights
    # for param in model.features.parameters():
    #     param.requires_grad = False

    model.to(args.device)
    criterion = nn.CrossEntropyLoss().to(args.device)
    optimizer = optim.Adam(model.parameters(), args.lr)

    # params = [p for p in model.parameters() if p.requires_grad]
    # optimizer = optim.Adam(params, lr=args.lr)

    best_acc1 = 0

    for epoch in range(args.epochs):  # loop over the dataset multiple times
        # train for one epoch
        mean_loss = train(train_loader, model, criterion, optimizer, args.device, epoch)

        # evaluate on validation set
        sum_num = evaluate(val_loader, model, args.device)
        acc1 = sum_num / len(val_dataset)

        # scheduler.step()
        ##  remember best acc@1 and save checkpoint
        logger.info("[epoch {}] accuracy: {}".format(epoch, round(acc1, 3)))
        tags = ["loss", "accuracy", "learning_rate"]
        tb_writer.add_scalar(tags[0], mean_loss, epoch)
        tb_writer.add_scalar(tags[1], acc1, epoch)
        tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)

        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        save_checkpoint({'epoch': epoch + 1,
                         'state_dict': model.state_dict(),
                         'best_acc1': best_acc1,
                         #    'optimizer' : optimizer.state_dict(),
                         #     'scheduler' : scheduler.state_dict()
                         },
                        is_best, args.save_path)


def parse_option():
    parser = argparse.ArgumentParser('MobileNetV2')
    parser.add_argument('--dummy', type=bool, help='Whether data needs to be generated', default=False)
    parser.add_argument('--epochs', type=int, help='the number of epoch', default=10)
    parser.add_argument('--batch_size', type=int, help='batch_size', default=32)
    parser.add_argument("--workers", type=int, help="the number of workers", default=3)
    parser.add_argument("--data_path", type=str, help="the path of model saved",
                        default='../../datasets/flowers')
    parser.add_argument("--save_path", type=str, help="the path of model saved",
                        default='../../models/MobileNetV2.pth')
    parser.add_argument('--lr', default=0.0002, type=float, metavar='LR',
                        help='initial learning rate', dest='lr')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--print_freq', default=30, type=int,metavar='W', help='Print frequency')
    parser.add_argument('--freeze_layers', type=bool, default=False)
    args = parser.parse_args()  # 也可直接使用 args, _ = parser.parse_known_args()
    return args


if __name__ == '__main__':
    args_ = parse_option()
    main(args_)