import shutil
import os 
import os.path as osp
import logging
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from typing import List

def getLogger(logger_name, 
              fmt = "[%(name)s] %(levelname)s %(message)s",
              level = logging.INFO):
    logger = logging.getLogger(logger_name)
    logger.setLevel(level)
    fmt = logging.Formatter(fmt)
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(fmt)
    logger.addHandler(console_handler)

    return logger


logger = getLogger(__name__)


def create_saved_folders(saved_model_path: str, 
                         dset_name: str, 
                         save_flag: str,) -> str:
    save_model_dir = osp.join(saved_model_path, dset_name, save_flag)
    if osp.exists(save_model_dir):
        logger.info(f"The model named '{save_flag}' trained on dataset {dset_name} already exists")
        override_flag = input("Override? Y/N: ")
        if override_flag.lower() == 'y':
            logger.info(f"Override the trained model '{save_flag}' and retrain")
            shutil.rmtree(save_model_dir)
            os.makedirs(save_model_dir)
        else:
            logger.info(f"Resume the trained model '{save_flag}'")
    else:
        logger.info(f"Create new model directory '{save_flag}' on dataset {dset_name}")
        os.makedirs(save_model_dir)

    return save_model_dir


def acquire_train_mode(ModeEnum, op_codes: List[int]) -> List:
    return [ModeEnum[list(ModeEnum)[idx].name] for idx in op_codes]


def display_performance(perf_dict: dict):
    logger.info("Current Performances (??): ")
    # TODO: the performances of all metrics



def parallelize_model(model: nn.Module, device_ids: List[int] = None):
    device_count = torch.cuda.device_count()
    if device_count > 1:
        if device_ids:
            logger.info(f"Use {len(device_ids)} of {device_count} GPUs")
            model = nn.DataParallel(module=model, device_ids=device_ids)
        else:
            logger.info(f"Use {device_count} of {device_count} GPUs")
            model = nn.DataParallel(model)
    return model


def get_lr_scheduler(lr_policy, optimizer, max_iter=None):
    if lr_policy["name"].lower() == "poly":
        assert max_iter > 0, "max iteration must be greater than 0"
        num_groups = len(optimizer.param_groups)

        def lambda_f(cur_iter):
            return (1 - float(cur_iter) / max_iter) ** lr_policy["power"]
        
        scheduler = LambdaLR(optimizer, lr_lambda=[lambda_f] * num_groups)
    else:
        raise NotImplementedError("lr policy not supprted")
    
    return scheduler
