import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import BatchSampler
import segmentation_models_pytorch as smp
# from utils.json import json_to_image
from utils.helper import get_subdirs, set_logger, GPUManager, del_useless_folders
from models.losses import BCEDiceLoss, DiceLoss, FocalLoss, BCEDiceFocalLoss
import os
import argparse
import pandas as pd
from pathlib import Path
import time
import torch.distributed as dist
# import numpy as np
# from sklearn.model_selection import train_test_split
from datasets import T2_Seg_Dataset
from datasets.transformations import multi_transforms_medium
from datasets.transformations import resize_transform_basic
import platform
import yaml
from utils.trainer import Seg_Trainer
from utils.trainer.distributed_utils import init_distributed_mode, setup_for_distributed, cleanup
from utils.helper import remove_dataparallel
# import nni
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import logging
logger = logging.getLogger('SegModel.Train')


def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Train a segmentation model.')
    parser.add_argument('--name', default=None,
                        help='Name of this experiment: (default: arch+timestamp)')
    parser.add_argument("--batch_size", type=int, default=5, help="batch size of input")
    parser.add_argument("--epochs", type=int, default=300000, help="total_epoch")
    parser.add_argument("--img_height", type=int, default=1024, help="size of image height")
    parser.add_argument("--img_width", type=int, default=1024, help="size of image width")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--lr", type=float, default=0.001, help="learning rate of the optimizer")
    parser.add_argument("--loss", type=str, default='bcedicefocal', choices=['bce', 'dice', 'bcedice', 'bcedicefocal'], help="Define loss function.")
    parser.add_argument("--dice_weight", type=float, default=0.5, help="When using loss of bcedice, then define dice_weight.")
    parser.add_argument("--model", type=str, default='Unet', choices=['Unet', 'DeepLabV3Plus' 'PSPNet'], help="Define model name")
    parser.add_argument("--backbone", type=str, default='se_resnext50_32x4d', choices=['resnet50', 'se_resnext50_32x4d', 'mobilenet_v2', 
        'timm-mobilenetv3_small_100', 'resnet18', 'mobileone_s4', 'timm-mobilenetv3_large_100', 'mobileone_s2'], help="Define model name")
    parser.add_argument("--optimizer", type=str, default='adabelief', choices=['adam', 'sgd', 'radam', 'adabelief'], help="Define optimizer.")
    parser.add_argument("--weight_decay", type=float, default=1e-5, help="weight_decay")
    parser.add_argument("--condition", type=str, default='iou', help=".")
    parser.add_argument("--patience", type=int, default=150000, help=".")
    parser.add_argument("--scheduler_factor", type=float, default=0.4, help=".")
    parser.add_argument("--train_csv", type=str, default='datasets/csv/AA_Vtech/train_20250614.csv', help="Define the location of train csv file.")
    parser.add_argument("--val_csv", type=str, default='datasets/csv/AA_Vtech/val_20250614.csv', help="Define the data location.")
    parser.add_argument("--mask_label", type=str, default='box', help="Mask Label in json file.")
    parser.add_argument("--save_dir", type=str, default='ckpts/XXXXX', help="Define where to save model checkpoints.")
    parser.add_argument("--use_gpu", type=bool, default=True, help="Define gpu")
    parser.add_argument("--syncBN", type=bool, default=True, help="Wether use syncBN")
    parser.add_argument("--pretrain", type=str, default=r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/code/adc_segmentation/ckpts/AA_T7/0527_0916_Unet_se_resnext50_32x4d/Unet_se_resnext50_32x4d_best_iou.pth', help="Define the location of pretrained weights.")
    parser.add_argument("--use_amp", type=bool, default=False, help="Wether use amp")

    return parser


def main():
    # choose free gpus to train
    # gpu_list = ",".join([str(x) for x in GPUManager().auto_choice(gpu_num=args.gpu_num)])
    # os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_list)

    parser = parse_args()
    # tuner_params = nni.get_next_parameter()
    # parser.set_defaults(**tuner_params)
    args = parser.parse_args()

    gpu_type = 0
    device = init_distributed_mode(args)
    if args.use_gpu and torch.cuda.is_available():
        if torch.cuda.device_count() > 1:
            setup_for_distributed(args.rank == 0)
            gpu_type = 2
            print('rank: {}, LOCAL_RANK:{}, WORLD_SIZE: {}'.format(args.rank, args.gpu, args.world_size))
        else:
            device = torch.device('cuda')
            gpu_type = 1
    else:
        device = torch.device('cpu')

    # make save dir
    os.makedirs(args.save_dir, exist_ok=True)

    # make sure that the mean and std are float list not str list
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]

    # if args.rank == 0:
    #     del_useless_folders(args.save_dir, remain_exts=['.pth', '.tar', '.pt', '.pkl'])
    if gpu_type == 2:
        dist.barrier()
    # clean up the save dir, delete useless dirs and make dir to store ckpts
    if args.name is None:
        args.name = '%s_%s_%s' % (
            time.strftime("%m%d_%H%M", time.localtime(time.time())), 
            args.model,
            args.backbone
            )
    out_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.join(args.save_dir, args.name)))
    os.makedirs(out_dir, exist_ok=True)
    
    #Setup log file to store training informations
    set_logger(os.path.join(out_dir, 'train.log'), 'SegModel')
    if args.rank == 0:
        Path(os.path.join(out_dir, 'is.training')).touch()
        logger.info(f'out_dir: {out_dir} .')

    # Search Data and Mask dir to create dataframe for training
    if args.rank == 0:
        logger.info(f'Loading the datasets train: {args.train_csv}, val: {args.val_csv} ...')

    df_train = pd.read_csv(args.train_csv)
    df_valid = pd.read_csv(args.val_csv)
    if args.rank == 0:
        logger.info(f'The train set shape is {df_train.shape} and the val set shape is {df_valid.shape}.')

    train_dataset = T2_Seg_Dataset(df_train,
                                    multi_transforms_medium(img_size=(args.img_height,args.img_width),
                                                            mean=args.mean,
                                                            std=args.std),
                                    args.mask_label
                                    )
    valid_dataset = T2_Seg_Dataset(df_valid,
                                    resize_transform_basic(img_size=(args.img_height,args.img_width),
                                                            mean=args.mean,
                                                            std=args.std),
                                    args.mask_label
                                    )
    
    kwargs = {'num_workers': min([os.cpu_count(), args.batch_size if args.batch_size > 1 else 0, 20]),
                'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
    if gpu_type == 2:
        train_sampler = DistributedSampler(train_dataset)
        val_sampler = DistributedSampler(valid_dataset)

        train_batch_sampler = BatchSampler(train_sampler, args.batch_size, drop_last=False)

        train_loader = DataLoader(train_dataset, batch_sampler=train_batch_sampler, **kwargs)
        val_loader = DataLoader(valid_dataset, sampler=val_sampler, batch_size=2*args.batch_size, **kwargs)
    else:
        train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(valid_dataset, batch_size=2*args.batch_size, shuffle=False, **kwargs)

    dataloaders = {'train':train_loader, 'val':val_loader}
    
    model = getattr(smp, args.model)(args.backbone, encoder_weights='imagenet', classes=1, activation=None)

    if args.rank == 0:
        logger.info('Model %s has been built.' %args.model)

    if args.pretrain is not None and os.path.isfile(args.pretrain) and os.path.exists(args.pretrain):
        if args.rank == 0:
            logger.info(f'use pretrain model:  {args.pretrain} .')
        params = torch.load(args.pretrain, map_location='cpu')
        try:
            params = params["state_dict"]
        except:
            params = params
        if args.rank == 0:
            logger.info('Pretrained from %s has been loaded.' %args.pretrain)
        model.load_state_dict(remove_dataparallel(params))

    if args.loss == 'bce':
        criterion = nn.BCEWithLogitsLoss()
    elif args.loss == 'dice':
        criterion = DiceLoss()
    elif args.loss == 'bcedice':
        criterion = BCEDiceLoss(bce_weight=1-args.dice_weight, dice_weight=args.dice_weight)
    elif args.loss == 'focal':
        criterion = FocalLoss(alpha=0.75)
    elif args.loss == 'bcedicefocal':
        criterion = BCEDiceFocalLoss(bce_weight=1-args.dice_weight, dice_weight=args.dice_weight)

    if args.rank == 0:
        logger.info('Using %s as loss function.' %args.loss)

    if gpu_type == 2:
        if args.syncBN:
            # 使用SyncBatchNorm后训练会更耗时
            model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        if args.rank == 0:
            logger.info('Using multi GPU.')
    elif gpu_type == 1:
        model = model.to(device)
        criterion = criterion.to(device)
        logger.info('Using single GPU.')
    
    if args.rank == 0:
        with open(os.path.join(out_dir, 'config.yaml'), 'w') as f:
            yaml.dump(args, f)

    model_trainer = Seg_Trainer(dataloaders, model, device, gpu_type, criterion, out_dir, args)
    model_trainer.start()
    
    if args.rank == 0:
        os.remove(os.path.join(out_dir, 'is.training'))
    
    if gpu_type == 2:
        dist.barrier()
        cleanup()

if __name__ == "__main__":
    main()
