import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import BatchSampler
import torch.distributed as dist
# from torch.cuda.amp import autocast, GradScaler
from utils.helper import get_subdirs, set_logger, GPUManager, del_useless_folders
import os
import sys
import signal
import argparse
import pandas as pd
import time
from pathlib import Path
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from datasets import T7_Box_Dataset
from datasets.transformations import multi_transforms_medium, multi_transforms_medium_2, multi_transforms_medium_4
from datasets.transformations import normalize_only
import platform
import yaml
import json
from utils.trainer import Cls_Trainer
from utils.helper import remove_dataparallel
from utils.trainer.distributed_utils import init_distributed_mode, setup_for_distributed, cleanup
import models as cls_models
from timm import create_model
from models.losses import FocalLoss, CELoss, GHMCLoss, WeightedCrossEntropyLoss, CrossEntropyLossOneHot
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import logging
logger = logging.getLogger('ClsModel.Train')


def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Train a segmentation model.')
    parser.add_argument('--name', default=None,
                        help='Name of this experiment: (default: arch+timestamp)')
    parser.add_argument("--batch_size", type=int, default=84, help="batch size of input")
    parser.add_argument("--epochs",  nargs='+', default=[100, 20], help="total_epoch")
    parser.add_argument("--freeze_type",  nargs='+', default=['0', '0'], help="total_epoch")
    parser.add_argument("--img_height", type=int, default=299, help="size of image height")
    parser.add_argument("--img_width", type=int, default=299, help="size of image width")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--lr", nargs='+', default=[0.001, 0.0004], help="learning rate of the optimizer")
    parser.add_argument("--weight_decay", type=float, default=1e-4, help="weight_decay")
    parser.add_argument("--loss_fn", type=str, default='crossentropylossonehot', choices=['celoss', 'focalloss', 'ghmc', 'weightedcrossentropyloss', 'crossentropylossonehot'], help="Define loss function.")
    parser.add_argument("--label_smooth", type=bool, default=False, help="Whether to use label smooth or not.")
    parser.add_argument("--model", type=str, default='densenet161', \
        choices=['resnet18', 'se_resnext50',  'se_resnext101', 'mobilenet_v2', 'mobilenetv2_140.ra_in1k', \
        'densenet161', 'densenet121', 'densenet169', 'densenet201', 'caformer_m36.sail_in22k_ft_in1k_384', \
        'convformer_s36.sail_in22k_ft_in1k_384', 'efficientnet_b5.sw_in12k_ft_in1k', \
        'inception_next_base.sail_in1k_384', 'tf_efficientnet_b7.ap_in1k', 'deit3_small_patch16_384.fb_in22k_ft_in1k',
        'volo_d2_224.sail_in1k', 'legacy_seresnext101_32x4d.in1k', 
        'legacy_seresnext50_32x4d.in1k', 'tiny_vit_21m_512.dist_in22k_ft_in1k', 
        'seresnextaa101d_32x8d.ah_in1k', 'regnetz_e8.ra3_in1k', 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k'], help="Define model name")
    parser.add_argument("--optimizer", nargs='+', default=['adabelief', 'sgd'], choices=['adam', 'sgd', 'radam', 'adabelief', 'rangerlars', 'rangerqh'], help="Define optimizer.")
    parser.add_argument("--betas_for_optim", nargs='+', default=[0.9, 0.999], help="Define optimizer param betas_for_optim.")
    parser.add_argument("--eps_for_optim", type=float, default=1e-06, help="Define optimizer param eps_for_optim.")
    parser.add_argument("--scheduler_factor", type=float, default=0.4, help="")
    parser.add_argument("--scheduler_patience", type=int, default=8, help="")

    parser.add_argument("--train_csv", type=str, default='datasets/csv/AA_T7_3890/train_20250409.csv', help="Define the location of train csv file.")
    parser.add_argument("--val_csv", type=str, default='datasets/csv/AA_T7_3890/val_20250409.csv', help="Define the data location.")
    parser.add_argument("--save_dir", type=str, default='ckpts/AA_T7_3890', help="Define where to save model checkpoints.")
    
    parser.add_argument("--use_mixup", type=int, default=0, help=".")
    parser.add_argument("--mixup_alpha", type=float, default=0.5, help="weight_decay")
    parser.add_argument("--cutmix_alpha", type=float, default=0.0, help="weight_decay")
    parser.add_argument("--mix_prob", type=float, default=1.0, help="weight_decay")
    
    parser.add_argument("--condition", type=str, default='acc', help=".")
    parser.add_argument("--earlystop_patience", type=int, default=15000, help=".")
    parser.add_argument("--key_code_pr_path", type=str, default=None, help=".")
    parser.add_argument("--key_code_weight", type=int, default=5, help=".")
    
    parser.add_argument("--use_gpu", type=bool, default=True, help="Define gpu")
    parser.add_argument("--syncBN", type=bool, default=True, help="Wether use syncBN")    
    parser.add_argument("--use_adc_pretrain", type=int, default=1, help="Define the location of pretrained weights.")
    parser.add_argument("--adc_pretrain_model_path", type=str, default=r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/code/adc_classification/ckpts/AA_T7/1231_143533_densenet161/densenet161_best_acc.pth', help="Define the location of pretrained weights.")
    parser.add_argument("--seed", type=int, default=615, help="")

    return parser

def main():
    parser = parse_args()
    args = parser.parse_args()

    gpu_type = 0
    device = init_distributed_mode(args)
    if args.use_gpu and torch.cuda.is_available():
        torch.backends.cudnn.benchmark=True
        if torch.cuda.device_count() > 1:
            setup_for_distributed(args.rank == 0)
            gpu_type = 2
            print('rank: {}, LOCAL_RANK:{}, WORLD_SIZE: {}.'.format(args.rank, args.gpu, args.world_size))
        else:
            device = torch.device('cuda')
            gpu_type = 1
    else:
        device = torch.device('cpu')

    os.makedirs(args.save_dir, exist_ok=True)

    # make sure that the mean and std are float list not str list
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]

    if gpu_type == 2:
        dist.barrier()
    # clean up the save dir, delete useless dirs and make dir to store ckpts
    if args.name is None:
        args.name = '%s_%s' % (
            time.strftime("%m%d_%H%M%S", time.localtime(time.time())),
            args.model
            )
    out_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.join(args.save_dir, args.name)))
    os.makedirs(out_dir, exist_ok=True)
    
    #Setup log file to store training informations
    set_logger(os.path.join(out_dir, 'train.log'), 'ClsModel')
    if args.rank == 0:
        Path(os.path.join(out_dir, 'is.training')).touch()
        logger.info(f'out_dir: {out_dir} .')
        
    # Search Data and Mask dir to create dataframe for training
    if args.rank == 0:
        logger.info(f'Loading the datasets train: {args.train_csv}, val: {args.val_csv} ...')

    df_train = pd.read_csv(args.train_csv)
    df_valid = pd.read_csv(args.val_csv)
    code_count = dict(zip(*np.unique(df_train["code"].values.tolist(), return_counts=True)))
    code_list = sorted(list(code_count.keys()))
    
    code2label = {}
    label2code = {}
    for idx, code in enumerate(code_list):
        code2label[code] = idx
        label2code[idx] = code

    if args.rank == 0:
        logger.info(f'Code num for training is {len(code_list)}.')
        logger.info(f'Code list used for training is  {code_list}.')
    class_num = len(code_list)
    df_train = df_train.loc[df_train["code"].isin(code_list)].copy().reset_index(drop=True)
    df_valid = df_valid.loc[df_valid["code"].isin(code_list)].copy().reset_index(drop=True)
    label_list = df_train["code"].tolist()
    label_list = [code2label[x] for x in label_list]

    if args.rank == 0:
        logger.info(f'The train set shape is {df_train.shape} and the val set shape is {df_valid.shape}.')

    train_dataset = T7_Box_Dataset(dataframe=df_train, 
                                    code2label=code2label,
                                    img_size=(args.img_height,args.img_width),
                                    transform=multi_transforms_medium_2(img_size=(args.img_height,args.img_width),
                                                            mean=args.mean,
                                                            std=args.std,
                                                            padding=16)
                                    )
    valid_dataset = T7_Box_Dataset(dataframe=df_valid,
                                    code2label=code2label,
                                    img_size=(args.img_height,args.img_width),
                                    transform=normalize_only(img_size=(args.img_height,args.img_width),
                                                    mean=args.mean,
                                                    std=args.std)
                                    )
    
    kwargs = {'num_workers': min([os.cpu_count(), args.batch_size if args.batch_size > 1 else 0, 20]), 
              'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
    
    if gpu_type == 2:
        train_sampler = DistributedSampler(train_dataset)
        val_sampler = DistributedSampler(valid_dataset)    

        train_batch_sampler = BatchSampler(train_sampler, args.batch_size, drop_last=False)

        train_loader = DataLoader(train_dataset, batch_sampler=train_batch_sampler, **kwargs)
        val_loader = DataLoader(valid_dataset, sampler=val_sampler, batch_size=2*args.batch_size, **kwargs)
    else:
        train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(valid_dataset, batch_size=90, shuffle=False, **kwargs)

    dataloaders = {'train':train_loader, 'val':val_loader}
    if args.model in ['resnet18', 'se_resnext50',  'se_resnext101', 'mobilenet_v2', 'densenet161', \
        'densenet121', 'densenet169', 'densenet201']:
        model = getattr(cls_models, args.model)(num_classes=class_num, pretrained='imagenet')  # input_channels=3,
    else:
        model = create_model(model_name=args.model, pretrained=True, num_classes=class_num)
    
    if args.rank == 0:
        logger.info('Model %s has been built.' %args.model)

    if args.use_adc_pretrain and args.adc_pretrain_model_path is not None and os.path.exists(args.adc_pretrain_model_path):
        if args.rank == 0:
            logger.info('use_adc_pretrain, adc_pretrain_model_path: %s .' %args.adc_pretrain_model_path)
        params = torch.load(args.adc_pretrain_model_path, map_location='cpu')
        try:
            params = params["state_dict"]
        except:
            params = params

        if (args.model == 'densenet161' or args.model == 'densenet169' or args.model == 'densenet201'):
            # pretrained_dict = {k: v for k, v in remove_dataparallel(params).items()}
            pretrained_dict = {k: v for k, v in remove_dataparallel(params).items() if k not in ['1.8.weight', '1.8.bias']}
        elif args.model == 'legacy_seresnext50_32x4d.in1k':
            pretrained_dict = {k: v for k, v in remove_dataparallel(params).items() if k not in ['last_linear.weight', 'last_linear.bias']}
        elif args.model in ['tiny_vit_21m_512.dist_in22k_ft_in1k', 'regnetz_e8.ra3_in1k']:
            pretrained_dict = {k: v for k, v in remove_dataparallel(params).items() if k not in ['head.fc.weight', 'head.fc.bias']}
        elif args.model == 'seresnextaa101d_32x8d.ah_in1k':
            pretrained_dict = {k: v for k, v in remove_dataparallel(params).items() if k not in ['fc.weight', 'fc.bias']}
        elif args.model == 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k':
            pretrained_dict = {k: v for k, v in remove_dataparallel(params).items() if k not in ['head.weight', 'head.bias']}

        model.load_state_dict(pretrained_dict, strict=False)
        if args.rank == 0:
            logger.info('Pretrained from %s has been loaded.' %args.adc_pretrain_model_path)

    for p in model.parameters():
        p.requires_grad = True

    if args.loss_fn.lower() == 'celoss' and args.label_smooth:
        criterion = CELoss(class_num=class_num, smooth=0.01).cuda()
    elif args.loss_fn.lower() == 'focalloss' and args.label_smooth:
        criterion = FocalLoss(class_num=class_num, smooth=0.03).cuda()
    elif (args.loss_fn.lower() == 'celoss') and (not args.label_smooth):
        criterion = nn.CrossEntropyLoss().cuda()
    elif (args.loss_fn.lower() == 'focalloss') and (not args.label_smooth):
        criterion = FocalLoss(class_num=class_num).cuda()
    elif args.loss_fn.lower() == 'ghmc':
        class_weight = compute_class_weight(class_weight='balanced', classes=np.arange(class_num), y=label_list)
        criterion = GHMCLoss(class_num=class_num, class_weight=class_weight).cuda()
    elif args.loss_fn.lower() == 'weightedcrossentropyloss':
        try:
            if os.path.isfile(args.key_code_pr_path) and os.path.exists(args.key_code_pr_path):
                with open(args.key_code_pr_path, 'r') as f:
                    key_code_pr = json.load(f)
                class_weight = [1] * len(label2code)
                for i in range(len(label2code)):
                    if label2code[i] in key_code_pr:
                        class_weight[i] = args.key_code_weight
            else:
                class_weight = None
        except:
            class_weight = None
        criterion = WeightedCrossEntropyLoss(class_num=class_num, class_weight=class_weight).cuda()
    elif args.loss_fn.lower() == 'crossentropylossonehot':
        criterion = CrossEntropyLossOneHot().cuda()

    if gpu_type == 2:
        if args.syncBN:
            # 使用SyncBatchNorm后训练会更耗时
            model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        if args.rank == 0:
            logger.info('Using multi GPU.')
    elif gpu_type == 1:
        model = model.cuda()
        # criterion = criterion.cuda()
        logger.info('Using single GPU.')
    
    if args.rank == 0:
        with open(os.path.join(out_dir, 'config.yaml'), 'w') as f:
            yaml.dump(args, f)
    
    # Tell Python to run the handler() function when SIGINT is recieved
    def handler(signal_received, frame):
        # Handle any cleanup here
        if os.path.exists(os.path.join(out_dir, 'is.training')):
            os.remove(os.path.join(out_dir, 'is.training'))
        print('SIGINT or CTRL-C detected. Exiting training.')
        sys.exit(0)
    
    signal.signal(signal.SIGINT, handler) # ctlr + c
    signal.signal(signal.SIGTSTP, handler) # ctlr + z
    if args.rank == 0:
        print('Training. Press CTRL-C to exit.')

    best_indics = 0
    for index, lr in enumerate(args.lr):
        if len(args.epochs) == len(args.lr):
            epoch = args.epochs[index]
        else:
            epoch = args.epochs[0]
        
        # model_freeze(model, args.freeze_type[index])
        # if gpu_type == 2:
        #     dist.barrier()

        earlystop_patience = args.earlystop_patience if args.freeze_type[index] == '0' else 10000
        lr_scheduler_name = 'ReduceLROnPlateau' if args.freeze_type[index] == '0' else 'CosineAnnealingLR'

        model_trainer = Cls_Trainer(dataloaders=dataloaders, model=model, device=device, gpu_type=gpu_type, optimizer=args.optimizer[index], criterion=criterion, out_dir=out_dir, ymap=label2code, class_num=class_num, \
                                    epoch=epoch, lr=lr, earlystop_patience=earlystop_patience, lr_scheduler_name=lr_scheduler_name, init_indics=best_indics, args=args)
        best_indics = model_trainer.start()
        if gpu_type == 2:
            dist.barrier()

    if args.rank == 0:
        os.remove(os.path.join(out_dir, 'is.training'))
    
    if gpu_type == 2:
        dist.barrier()
        cleanup()

def model_freeze(model, freeze_type):
    for x in list(model.children()):
        for name, val in x.named_parameters():
            val.requires_grad = True
    if freeze_type == '0':
        return
    elif freeze_type == 'body':
        for x in list(model.children())[:-1]:
            for name, val in x.named_parameters():
                val.requires_grad = False
        return
    elif freeze_type == 'head':
        for name, val in list(model.children())[-1].named_parameters():
            val.requires_grad = False
        return
    else:
        return

if __name__ == "__main__":
    main()
