import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet50
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
import torch.optim.lr_scheduler as lr_scheduler
from tqdm import tqdm
import os
import time
import shutil
import argparse
from loguru import logger
from datetime import datetime
import sys
import tempfile
import math
from colossalai.logging import get_dist_logger
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader
from colossalai.nn.lr_scheduler import CosineAnnealingLR
from colossalai.nn.metric import Accuracy
from colossalai.trainer import Trainer, hooks
import colossalai


logger.add(f"../../log/{str(datetime.today())[:10]}_ResNet_train.log", level='INFO', encoding="utf-8", enqueue=True)


def time_it(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(f"Function {func.__name__} took {(end_time - start_time) :.3f} seconds to run.")
        return result
    return wrapper


# @time_it
def main(args):
    if torch.cuda.is_available() is False:
        raise EnvironmentError("not find GPU device for training.")

    device = torch.device(args.device)

    colossalai.launch_from_torch(
        config='./config.py',
    )
    print('colossalai----------')
    logger = get_dist_logger()


    print(args)
    print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
    tb_writer = SummaryWriter()

    # Data loading code
    if args.dummy:
        print("=> Dummy data is used!")
        train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor())
        val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor())
    else:
        traindir = os.path.join(args.data_path, 'train')
        valdir = os.path.join(args.data_path, 'test')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        val_dataset = datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    train_dataloader = get_dataloader(dataset=train_dataset,
                                      shuffle=True,
                                      batch_size=args.batch_size,
                                      num_workers=args.workers,
                                      pin_memory=True,
                                      )

    test_dataloader = get_dataloader(dataset=val_dataset,
                                     add_sampler=False,
                                     batch_size=args.batch_size,
                                     num_workers=args.workers,
                                     pin_memory=True,
                                     )

    # save:  daisy,dandelion,roses,sunflowers,tulips
    classes_list = train_dataset.classes
    with open('../../datasets/flowers/classes.txt', 'w') as text_cls:
        classes_str = ','.join(classes_list)
        text_cls.write(classes_str)

#     train_num = len(train_loader)  # 训练总数/batch_size
#     val_num = len(val_loader)
#     logger.info(f"using {train_num} batchs for training, training: {len(train_dataset)} ; \
# {val_num} batchs for validation, validation:{len(val_dataset)}  .")
    model = resnet50(num_classes=5)
    # model.to(device)
    optimizer = optim.Adam(model.parameters(), args.lr)
    criterion = nn.CrossEntropyLoss().to(device)

    print('initialize start-------')
    engine, train_dataloader, test_dataloader, _ = colossalai.initialize(model,
                                                                         optimizer,
                                                                         criterion,
                                                                         train_dataloader,
                                                                         test_dataloader,
                                                                         )
    print('initialize end-------')
    best_acc1 = 0
    for epoch in range(args.epochs):  # loop over the dataset multiple times
        # train for one epoch
        mean_loss = train(train_dataloader, engine, criterion, optimizer, device, epoch)

        # evaluate on validation set
        sum_num = evaluate(test_dataloader, engine, device)
        acc1 = sum_num / len(val_dataset)

        logger.info("[epoch {}] accuracy: {}".format(epoch, round(acc1, 3)))
        tags = ["loss", "accuracy", "learning_rate"]
        tb_writer.add_scalar(tags[0], mean_loss, epoch)
        tb_writer.add_scalar(tags[1], acc1, epoch)
        tb_writer.add_scalar(tags[2], optimizer.param_groups[0]["lr"], epoch)

        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        save_checkpoint({'epoch': epoch + 1,
                         'state_dict': model.state_dict(),  # 可以直接使用单卡方式保存，不需加module
                         'best_acc1': best_acc1,
                         #    'optimizer' : optimizer.state_dict(),
                         #     'scheduler' : scheduler.state_dict()
                         },
                        is_best, args.save_path)


def train(train_loader, engine, criterion, optimizer, device, epoch):
    engine.train()
    mean_loss = torch.zeros(1).to(device)
    optimizer.zero_grad()
    with tqdm(train_loader, unit="batch") as tepoch:
        for step, data in enumerate(tepoch):
            tepoch.set_description(f"Epoch {epoch}")
            images, labels = data

            engine.zero_grad()
            pred = engine(images.to(device))

            loss = criterion(pred, labels.to(device))
            engine.backward(loss)
            # loss.backward()
            # loss = reduce_value(loss, average=True)
            mean_loss = (mean_loss * step + loss.detach()) / (step + 1)  # update mean losses
            # 在进程0中打印平均loss
            train_loader.desc = "[epoch {}] mean loss {}".format(epoch, round(mean_loss.item(), 3))

            if not torch.isfinite(loss):
                print('WARNING: non-finite loss, ending training ', loss)
                sys.exit(1)

            engine.step()
            # optimizer.zero_grad()
            tepoch.set_postfix(loss=loss.item())
        # 等待所有进程计算完毕
        if device != torch.device("cpu"):
            torch.cuda.synchronize(device)

        return mean_loss.item()


def evaluate(val_loader, engine, device):
    engine.eval()
    # 用于存储预测正确的样本个数
    sum_num = torch.zeros(1).to(device)
    for step, data in enumerate(val_loader):
        images, labels = data
        with torch.no_grad():
            pred = engine(images.to(device))
        pred = torch.max(pred, dim=1)[1]
        sum_num += torch.eq(pred, labels.to(device)).sum()
    return sum_num.item()


def save_checkpoint(state, is_best, filename='checkpoint.pth'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, '../../ResNet_best_engine.pth')


def parse_option():
    parser = argparse.ArgumentParser('ResNet')
    parser.add_argument('--dummy', type=bool, help='Whether data needs to be generated', default=False)
    parser.add_argument('--epochs', type=int, help='the number of epoch', default=3)
    parser.add_argument('--gpu_id', type=str, default='5,6')   # add for multi
    parser.add_argument('--batch_size', type=int, help='batch_size', default=32)
    parser.add_argument("--workers", type=int, help="the number of workers", default=8)
    parser.add_argument("--data_path", type=str, help="the path of model saved",
                        default='../../datasets/flowers')
    parser.add_argument("--save_path", type=str, help="the path of model saved",
                        default='./ResNet.pth')
    parser.add_argument('--lr', default=0.0002, type=float, metavar='LR',
                        help='initial learning rate', dest='lr')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--print_freq', default=30, type=int,metavar='W', help='Print frequency')
    # 是否启用SyncBatchNorm
    parser.add_argument('--syncBN', type=bool, default=False)
    parser.add_argument('--weights', type=str, default='',
                        help='initial weights path')
    parser.add_argument('--freeze_layers', type=bool, default=False)
    # 不要改该参数，系统会自动分配
    parser.add_argument('--device', default='cuda', help='device id (i.e. 0 or 0,1 or cpu)')
    # 开启的进程数(注意不是线程),不用设置该参数，会根据nproc_per_node自动设置
    parser.add_argument('--world_size', default=2, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    args = parser.parse_args()  # 也可直接使用 args, _ = parser.parse_known_args()
    return args


if __name__ == '__main__':
    args_ = parse_option()
    start_time = time.time()
    main(args_)
    end_time = time.time()
    print(f"Function   took {(end_time - start_time) :.3f} seconds to run.")
