
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import sys
import os

sys.path.append(os.getcwd() + "/models")

sys.path.append("utils")
from class_model import VGG19
import vgg
from custom_model import CNN5NET
from five import CNNFIVE
from ghostnetv2_torch import ghostnetv2
from  ShuffleNetv2_torch import ShuffleNetV2
import time
import os
from math import cos, pi
import numpy as np
import cv2
from torchsampler import ImbalancedDatasetSampler

from FocalLoss import FocalLoss
from sklearn.metrics import confusion_matrix
from torch.utils.tensorboard import SummaryWriter #tensorboard 可视化
# %matplotlib inline
# **************************************************************
import argparse

def make_parser():
    parser = argparse.ArgumentParser(description="training config", add_help= False)

    parser.add_argument("--model", default='ghostnet2', type=str) #ghostnet2 or 
    parser.add_argument("--image_root", default='', type=str)
    parser.add_argument("--num_classes", default=2, type=int)
    parser.add_argument("--max_epoch", default=200, type=int)
    parser.add_argument("--batch_size", default=64, type=int)
    parser.add_argument("--lr_min", default=0.0001, type=float)
    parser.add_argument("--lr_max", default=0.01, type=float)
    parser.add_argument("--input_size", default=[192, 192], type=int, nargs='+')

    parser.add_argument("--load_train", default='', type=str)
    parser.add_argument("--fade", default=0, type=int)
    parser.add_argument("--resume_train", default='', type=str)
    parser.add_argument("--mean", default=[0.4278617, 0.4715784, 0.47092885], type=float, nargs='+')
    parser.add_argument("--std", default=[0.2286385, 0.22028573, 0.2346289], type=float, nargs='+')
    parser.add_argument("--num_workers", default=4, type=int)
    parser.add_argument("--base_lr", default=0.01, type=float)

    parser.add_argument("--output_name", default='', type=str)
    return parser


# **************************************************************
'''{'green': 0, 'left_green': 1, 'left_red': 2, 'left_yollow': 3, 'off': 4, 'other': 5, 'red': 6, 'right_green': 7, 'right_red': 8, 'right_yellow': 9, 'straight_green': 10, 'straight_red': 11, 'straight_yellow': 12, 'yellow': 13}
39
{'green': 0, '19eft_green': 1, 'left_red': 2, 'left_yollow': 3, 'off': 4, 'other': 5, 'red': 6, 'right_green': 7, 'right_red': 8, 'right_yellow': 9, 'straight_green': 10, 'straight_red': 11, 'straight_yellow': 12, 'yellow': 13}'''


def adjust_learning_rate(optimizer, current_epoch, max_epoch, lr_min=0., lr_max=0.01, warmup=True):
    warmup_epoch = 5 if warmup else 0
    lr = optimizer.param_groups[0]['lr']
    lr_min = lr * 0.5
    if current_epoch < warmup_epoch:
        lr = lr_max * current_epoch / warmup_epoch
    else:
        lr = lr_min + 0.5 * (lr_max - lr_min) * (
            1.0
            + cos(
                pi
                * (current_epoch - warmup_epoch)
                / (max_epoch - warmup_epoch)
            )
        )
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
        print("lr:", lr)



def main(args):

    current_time = time.localtime()
    savefolder = os.path.join("output/" + args.output_name, time.strftime("%Y_%m_%d_%H_%M_%S", current_time))
    os.makedirs(savefolder, exist_ok = True)

    # 利用torchvision对图像数据预处理
    from preproc import static_resize, augment_hsv
    train_transform = transforms.Compose([
                            augment_hsv(),
                            static_resize([256, 256]),
                            # transforms.Resize(96),
                            transforms.RandomResizedCrop(args.input_size),
                            transforms.RandomVerticalFlip(p=0.5),
                            transforms.RandomHorizontalFlip(p=0.5),
                            transforms.RandomAffine(degrees=30,scale=(0.8,1.5)),
                            transforms.autoaugment.AutoAugment(policy=transforms.autoaugment.AutoAugmentPolicy('imagenet')),
                            transforms.ToTensor(),
                            transforms.Normalize(args.mean, args.std),
                            ])
    fade_transform = transforms.Compose([
                            augment_hsv(),
                            static_resize([256, 256]),
                            transforms.RandomResizedCrop([224, 224]),
                            transforms.ToTensor(),
                            transforms.Normalize(args.mean, args.std),
                            ])

    val_transform = transforms.Compose([
                                        static_resize(args.input_size),
                                        # transforms.CenterCrop(80),
                                        transforms.ToTensor(),
                                        transforms.Normalize(args.mean, args.std)
                                        ])

    trainset = torchvision.datasets.ImageFolder(root=args.image_root + '/train', transform=train_transform)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, sampler=ImbalancedDatasetSampler(trainset),shuffle=False, num_workers=args.num_workers)

    fadeset = torchvision.datasets.ImageFolder(root=args.image_root + '/train', transform=fade_transform)
    fadeloader = torch.utils.data.DataLoader(fadeset, batch_size=args.batch_size, sampler=ImbalancedDatasetSampler(trainset),shuffle=False, num_workers=args.num_workers)

    valset = torchvision.datasets.ImageFolder(root=args.image_root + '/val', transform=val_transform)
    valloader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    # 展示训练样本和测试样本数
    print("len(trainloader) = ", len(trainloader))
    print(trainset.class_to_idx)
    print("len(valloader) = ", len(valloader))
    print(valset.class_to_idx)

    # CPU 或者 GPU
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    # 初始化网络,加载预训练模型
    # model = ShuffleNetV2(num_classes=2)
    # model = CNNFIVE(num_classes=2, init_weights=True)
    # model = VGG19(num_classes=2, init_weights=True)
    model = ghostnetv2(num_classes=args.num_classes,
                        width=1.6,
                        dropout=0.2)
    # model = vgg.vgg19_bn(num_classes=args.num_classes)
    if args.load_train:
        model_dict = model.state_dict()
        state_dict = torch.load(args.load_train)
        new_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
        model_dict.update(new_state_dict)
        model.load_state_dict(model_dict)

    start_epoch = 0
    if args.resume_train:
        model_dict = model.state_dict()
        state_dict = torch.load(args.resume_train)
        new_state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
        model_dict.update(new_state_dict)
        model.load_state_dict(model_dict)
        start_epoch = state_dict["start_epoch"]
        optimizer = state_dict["optimizer"]


    # import thop
    # x = torch.randn(1,3,224,224)
    # flops, params = thop.profile(model,inputs=(x,))
    # print('flops: ', flops, 'params: ', params)
    # print('flops: %.2f M, params: %.2f M' % (flops / 1000000.0, params / 1000000.0))

    # 查看GPU可用情况
    # if torch.cuda.device_count()>1:
    #     print('We are using',torch.cuda.device_count(),'GPUs!')
    #     model = nn.DataParallel(model)
    model.to(device)

    # 定义loss function和优化器
    criterion = nn.CrossEntropyLoss()
    # criterion = FocalLoss(class_num=args.num_classes, alpha=torch.tensor([0.2, 0.4, 0.4]), gamma=2)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.base_lr, momentum=0.9)
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.base_lr, betas=(0.9, 0.999),
    #                             eps=1e-08,
    #                             weight_decay=0,
    #                             amsgrad=False) #20230718上训练效果不如SGD，最高为98.1417

    tblogger = SummaryWriter(savefolder) #tensorboard 可视化

    # 保存每个epoch后的Accuracy Loss Val_Accuracy
    Accuracy = []
    Loss = []
    Val_Accuracy = []
    BEST_VAL_ACC = 0.
    # 训练
    since = time.time()
    print("start_epoch: ", start_epoch)
    for epoch in range(start_epoch, args.max_epoch):
        train_loss = 0.
        train_accuracy = 0.
        run_accuracy = 0.
        run_loss =0.
        total = 0.
        model.train()
        adjust_learning_rate(optimizer, epoch, args.max_epoch, lr_min=args.lr_min, lr_max=args.lr_max, warmup=True)
        if epoch >= args.max_epoch - args.fade:
            trainloader = fadeloader
        for i,data in enumerate(trainloader):
            # print("i: ", i)
            images, labels = data
            images = images.to(device)
            labels = labels.to(device)  
            # 经典四步
            optimizer.zero_grad()
            outs = model(images)
            loss = criterion(outs, labels)
            loss.backward()
            optimizer.step()
            # 输出状态
            total += labels.size(0)
            run_loss = loss.item()
            _, prediction = torch.max(outs, 1)
            # print("prediction====", prediction)
            run_accuracy = (prediction == labels).sum().item()
            if i % 20 == 19:
                print('|epoch: {}|, |iter: {}|, |lr: {:.4f}| train accuracy: {:.4f}% |loss: {:.4f}|'.format(epoch + 1,
                        i + 1, optimizer.param_groups[0]['lr'], 100*run_accuracy/labels.size(0), run_loss/labels.size(0)))
            train_accuracy += run_accuracy
            train_loss += run_loss
            # run_accuracy, run_loss = 0., 0.
        Loss.append(train_loss / total)
        tblogger.add_scalar("train_loss", train_loss / total, epoch + 1) #tensorboard 可视化
        Accuracy.append(100 * train_accuracy/total)
        tblogger.add_scalar("train_accuracy", 100 * train_accuracy/total, epoch + 1) #tensorboard 可视化
        tblogger.add_scalar("lr", optimizer.param_groups[0]['lr'], epoch + 1) #tensorboard 可视化

        acc = 0.
        model.eval()
        print('waitting for Val...')
        label_list = []
        prediction_list = []
        with torch.no_grad():
            accuracy = 0.
            total =0
            for data in valloader:
                images, labels = data
                images = images.to(device)
                labels = labels.to(device)
                out = model(images)
                _, prediction = torch.max(out, 1)
                label_list.extend(list(np.array(labels.cpu())))
                prediction_list.extend(list(np.array(prediction.cpu())))
                total += labels.size(0)
                accuracy += (prediction == labels).sum().item()
            acc = 100.*accuracy/total
        tblogger.add_scalar("val_accuracy", acc, epoch + 1) #tensorboard 可视化
        print('epoch {}  The ValSet accuracy is {:.4f}% \n'.format(epoch + 1, acc))
        Val_Accuracy.append(acc)

        # 混淆矩阵
        cm = confusion_matrix(np.array(label_list), np.array(prediction_list), labels=np.arange(0, args.num_classes).tolist())
        print(type(cm))
        print('Confusion matrix\n\n', cm)
        ckpt_state = {
                "start_epoch":epoch + 1,
                "model": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            }
        torch.save(ckpt_state, savefolder + f"/epoch_{epoch}.pth")
        if acc > BEST_VAL_ACC:
            print('Find Better Model and Saving it...')
            torch.save(ckpt_state, savefolder + "/best.pth")
            BEST_VAL_ACC = acc
            print('Saved!')
        
        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed%60))
        print('Now the best val Acc is {:.4f}%'.format(BEST_VAL_ACC))

if __name__ == "__main__":
    args = make_parser().parse_args()
    main(args)
