import time
import warnings
import logging
import os, sys, math
import argparse

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim

import zipfile
import torchvision
from torchvision import models
from torchvision.datasets import ImageFolder
from torchvision import transforms

DEFAULT_CUDA_ID = 0

def sys_update_trainning_info(epoch, total_epoch, train_loss, val_loss, trainning_time, val_time, mem, word_L):

    train_t = int(round(trainning_time))
    val_t = int(round(val_time))
    sys.stdout.flush()
    sys.stdout.write(
        '{}/{}'.format(epoch, total_epoch).center(word_L) + 
        '{:1.5f}'.format(train_loss).center(word_L) + 
        '{:1.5f}'.format(val_loss).center(word_L) + 
        f'{mem:.3g}G'.center(word_L) + 
        # f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'.center(word_L) + 
        '\r'
        )

def train_one_epoch(model: torch.nn.Module, criterion: nn.Module,
                    dataloader_train, dataloader_val, 
                    optimizer: torch.optim.Optimizer,
                    device: torch.device, 
                    epoch: int, 
                    total_epoch: int):
    model.train()
    avg_loss = 0.0
    total_loss = 0.0
    t0 = time.time()
    val_t = int(round(time.time() - t0))
    mem = 0
    num_steps = len(dataloader_train)
    for iteration, batch in enumerate(dataloader_train):
        img, annot = batch
        batch = img.shape[0]
        optimizer.zero_grad()
        pred = model(img.to(device).float())
        loss_value = criterion(pred, annot.to(device))
        loss_value.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)  # clip gradients
        optimizer.step()

        total_loss += loss_value.item()
        avg_loss = total_loss / (iteration + 1)
        mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0
        sys_update_trainning_info(epoch, total_epoch, avg_loss, 0.0, time.time() - t0, val_t, mem, 20)
        return avg_loss

def main(args=None):
    # 参数设置
    parser = argparse.ArgumentParser(description='Simple training script.')
    # 数据集
    parser.add_argument('--dataset',        type=str,   default='folder',           help='Dataset type')
    parser.add_argument('--dataset_path',   type=str,   default='../dataset/catsdogs',
                                                                                    help='Path to COCO directory')
    parser.add_argument('--mixup_enable',   type=bool,  default=True,               help='if enable mixup augmentation')
    parser.add_argument('--mixup_prob',     type=float, default=0.3,                help='probability of applying mixup or cutmix per batch or element')
    parser.add_argument('--mixup_mode',     type=str,   default='batch',            help='how to apply mixup/cutmix params (per batch, pair (pair of elements), elem (element)')
    parser.add_argument('--mc_switch_prob', type=float, default=0.5,                help='prob to switch mixup and cutmix')
    parser.add_argument('--mixup_beta',     type=float, default=1.0,                help='mixup beta to generate mixup beta, mixup is active if > 0')               #mixup使用beta分布
    parser.add_argument('--cutmix_beta',    type=float, default=1.0,                help='cutmix beta to generate mixup beta, cutmix is active if > 0')
    parser.add_argument('--label_smoothing',type=float, default=0.1,                help='label smoothing')
    parser.add_argument('--input_shape',    type=int,   default=64,                 help='input_shape')
    parser.add_argument('--zip_path',       type=str,   default='',                 
                                                                                    help='Path to COCO directory')
    # parser.add_argument('--zip_path',       type=str,   default='obs://jincheng/aisafety_dataset/aisafety.zip',                 
    #                                                                                 help='Path to COCO directory')
    parser.add_argument('--num_workers',    type=int,   default=8,                  help='num of fretch data threads')
    # 训练参数
    parser.add_argument('--device',         type=str,   default='cuda',             help='device to use for training / testing')
    parser.add_argument('--epochs',         type=int,   default=50,                 help='Number of epochs')
    parser.add_argument('--warm_up_epochs', type=int,   default=5,                  help='warm up epoch')
    parser.add_argument('--warm_up_init',   type=float, default=5e-5,               help='warm up init learning rate')
    parser.add_argument('--warm_up_prefix', type=bool,  default=True,               help='warm up prefix')
    parser.add_argument('--amp',            type=bool,  default=False,              help='trainning with amp, True or False')             #是否进行混合精度
    parser.add_argument('--lr',             type=float, default=4e-3,               help='learning rate')
    parser.add_argument('--min_lr',         type=float, default=1e-5,               help='minum learning rate')
    parser.add_argument('--batch_size',     type=int,   default=32,               help='batch size')
    parser.add_argument('--optimizer_type', type=str,   default='adamw',            help='the optimizer type, sgd or adam')
    parser.add_argument('--weight_decay',   type=float, default=0.0005,             help='weight decay')
    parser.add_argument('--momentum',       type=float, default=0.937)
    parser.add_argument('--log_file',       type=str,   default='weights/ghost_net/loss_log.txt',
                                                                                    help='trainning log', )
    parser.add_argument('--frozen_weights', type=float, default=0,                  help="if frozen weight, if set, the backbone will be unfreezed in epoch * frozen_weights")         #是否冻结主干，大于零则进行冻结训练，否则进行冻结epoch大于这个点后进行解冻
    # 权重加载
    parser.add_argument('--resume',         type=str,   default='',                 
                                                                                    help='resume from checkpoint')
    # parser.add_argument('--weight', type=str, help='weigth path of model', default='obs://jincheng/detection/my_work/weights/official/yolov8s_state_dict.pt')
    parser.add_argument('--weight',         type=str,   default='',                 
                                                                                    help='weigth path of model')    
    parser.add_argument('--start_epoch',    type=int,   default=0,  metavar='N',    help='start epoch')
    # 输出目录
    # parser.add_argument('--save_dir', type=str, default='obs://jincheng/detection/my_work/train_log', help='path to save weights')
    parser.add_argument('--save_dir',       type=str,   default='weights/ghost_net',     
                                                                                    help='path to save weights')
    parser.add_argument('--save_epoch',     type=int,   default=1,                  help='save epoch')
    
    parser = parser.parse_args(args)

    # device
    device = torch.device('cpu')
    if parser.device == 'cuda' and torch.cuda.is_available():
        print(torch.cuda.get_device_properties(DEFAULT_CUDA_ID))
        device = torch.device('cuda')
    
    # 优先使用zip，如果要训练本地数据，要先把zip的default设置为无
    dataset_path = ''
    if parser.zip_path:
        zip_file = zipfile.ZipFile(parser.zip_path)
        dataset_path = zip_file.namelist()[0]
        # 解压
        zip_extract = zip_file.extractall('./')
        zip_file.close()
    else:
        dataset_path = parser.dataset_path

    num_classes = 0
    normalize=transforms.Normalize(mean=[.5,.5,.5],std=[.5,.5,.5])
    transform_normal=transforms.Compose([
        transforms.Resize([parser.input_shape, parser.input_shape]),
        transforms.ToTensor(), #将图片转换为Tensor,归一化至[0,1]
        normalize
    ])

    if parser.dataset == 'folder':
        dataset_train = ImageFolder(os.path.join(dataset_path, 'train'), transform=transform_normal)
        dataset_val = ImageFolder(os.path.join(dataset_path, 'val'), transform=transform_normal)
        num_classes = len(dataset_train.classes)
        print('Load dataset from {} with {} classes, {} labels, batch_size={}'.format(dataset_path, len(dataset_train.classes), len(dataset_train), parser.batch_size))

    dataloader_train = DataLoader(dataset_train, num_workers=parser.num_workers, batch_size=parser.batch_size, shuffle=True)
    if dataset_val is not None:
        dataloader_val = DataLoader(dataset_val, num_workers=parser.num_workers, batch_size=parser.batch_size, shuffle=True)

    model = models.resnet34(pretrained=False)
    num_ftrs = model.fc.in_features  
    model.fc = nn.Sequential(
        nn.Linear(num_ftrs,num_classes),
        nn.Sigmoid()
        )
    
    model.to(device)
    model.training = True
    compute_loss = nn.CrossEntropyLoss()
    weight_decay = parser.weight_decay           #权重衰退的正则项，防止过拟合
    momentum=parser.momentum
    lr = parser.lr
    optimizer_type = parser.optimizer_type
    optimizer = {
            'adam': optim.Adam(model.parameters(), lr=lr, betas=(momentum, 0.999), weight_decay=weight_decay),
            'sgd': optim.SGD(model.parameters(), lr=lr, momentum=momentum, nesterov=True, weight_decay=weight_decay),
            'adamw': optim.AdamW(model.parameters(), eps=1e-8, betas=(0.9, 0.999), lr=lr, weight_decay=weight_decay)
            # 'sgd': optim.SGD(model.parameters(), lr=lr)
        }[optimizer_type]
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=parser.epochs + 5)

    for epoch_num in range(parser.epochs):
        epoch_loss = train_one_epoch(model, compute_loss, dataloader_train, dataloader_val, optimizer, device, epoch_num, parser.epochs)
    torch.save(model.state_dict(), os.path.join(parser.save_dir, 'model_final_state_dict.pt'))

if __name__ == '__main__':
    main()