import copy
import os.path

import torch
import yaml
import importlib
from loguru import logger
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

class BaseConfig(object):
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)

    def __update__(self, **kwargs):
        self.__dict__.update(kwargs)


class Config:
    def __init__(self, yaml_path):
        with open(yaml_path, encoding='utf-8') as f:
            yaml_config = yaml.safe_load(f)
        main_config = yaml_config['main']
        for key, value in main_config.items():
            setattr(self, key, value)
        if "eval_max_seq_length" not in main_config.keys():
            setattr(self, "eval_max_seq_length", self.train_max_seq_length)
        self.parse_optimizer_config(yaml_config['optimizer'])
        self.parse_model_config(yaml_config['model_config'])
        self.dataset_class, self.dataloader_class = self.import_dataset()
        self.model_class = self.import_model()

    def import_dataset(self):
        dataset_wrapper = importlib.import_module('dataset.{}.{}'.format(self.task, self.dataset))
        return dataset_wrapper.get_dataset()

    def import_model(self):
        model_class = importlib.import_module('model_arch.{}.{}'.format(self.task, self.model))
        return model_class.get_model()

    def parse_model_config(self, yaml_model_config):
        mc = BaseConfig()
        for key, value in yaml_model_config.items():
            setattr(mc, key, value)
        if "logging_steps" not in yaml_model_config.keys():
            setattr(mc, 'logging_steps', 50)
        if "max_grad_norm" not in yaml_model_config.keys():
            setattr(mc, 'max_grad_norm', 1.0)
        if "evaluate_during_training" not in yaml_model_config.keys():
            setattr(mc, 'evaluate_during_training', False)
        if "load_mode" not in yaml_model_config.keys():
            setattr(mc, 'load_mode', None)
        if "device" not in yaml_model_config.keys():
            device = self.set_device()
        else:
            device = self.set_device(device_id=yaml_model_config['device'])
        setattr(mc, 'device', device)
        self.model_config = mc

    def parse_optimizer_config(self, optimizer_config):
        oc = BaseConfig()
        optimizer_module = importlib.import_module('torch.optim')

        setattr(oc, 'optimizer_class', getattr(optimizer_module, optimizer_config['optimizer_class']))
        setattr(oc, 'params', optimizer_config['params'])
        self.optimizer_config = oc

    @staticmethod
    def set_device(device_id=None):
        if not torch.cuda.is_available():
            device = torch.device('cpu')
            return device
        if device_id is not None:
            return torch.device('cuda:{}'.format(device_id))
        else:
            # TODO:选择最优GPU
            import pynvml
            pynvml.nvmlInit()
            device_count = pynvml.nvmlDeviceGetCount()
            max_free = 0
            best_device_id = 0
            for gpu_id in reversed(range(device_count)):
                handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
                meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
                free = meminfo.free
                used = meminfo.used
                logger.info('GPU {} free: {}MB used: {}MB'.format(gpu_id, free / 1024 / 1024, used / 1024 / 1024))
                if used / 1024 / 1024 < 200:
                    best_device_id = gpu_id
                    break
                if free > max_free:
                    max_free = free
                    best_device_id = gpu_id
            pynvml.nvmlShutdown()
            return torch.device('cuda:{:d}'.format(best_device_id))

    def __str__(self):
        config_dict = copy.deepcopy(self.__dict__)
        config_dict['model_config'] = copy.deepcopy(self.model_config.__dict__)
        config_dict['optimizer_config'] = copy.deepcopy(self.optimizer_config.__dict__)
        return str(config_dict)


if __name__ == '__main__':
    config = Config('bertsoftmax_cluner.yaml')
    print(type(config.model_config))
