import argparse
from pathlib import Path
import typing
import os
import re
# https://github.com/uiri/toml
import toml

project_dir = Path(__file__).resolve().parents[0]


# NOTE: 推荐一层继承, 两层继承反而增加用户使用复杂度, 但是实现时支持了多层继承
def load_config(toml_f):
    """load config from toml file.
    :toml_f: toml file
    :returns: parsed_toml
    """
    parsed_toml = toml.load(toml_f)
    if 'parent' in parsed_toml:
        parent_toml = load_config(parsed_toml['parent'])
        # 使用子配置文件内容覆盖父配置文件
        for k in parsed_toml:
            if k != 'train' and k != 'test':
                parent_toml[k] = parsed_toml[k]
        for k in parsed_toml['train']:
            parent_toml['train'][k] = parsed_toml['train'][k]
        for k in parsed_toml['test']:
            parent_toml['test'][k] = parsed_toml['test'][k]
        return parent_toml
    else:    # 边界
        return parsed_toml

#  TODO: add test
def check_dir(train_log_file, test_log_file):
    """检查生成文件是否与现有的文件存在冲突
    :returns: 可训练/测试标志
    """
    train_log_file: Path
    test_log_file: Path
    ret = [0, 0]  # (可训练的标志，可测试的标志)

    if not train_log_file.exists():
        train_log_file.parent.mkdir(parents=True, exist_ok=True)
        ret[0] = 1

    if not test_log_file.exists():
        test_log_file.parent.mkdir(parents=True, exist_ok=True)
        ret[1] = 1

    return ret


shortname = {
    'numSegments': 'num_segments',
    'trainList': 'train_list',
    'testList': 'test_list',
    'lstmLr': 'lstm_lr',
    'batchSize': 'batch_size',
    'lrStep': 'lr_steps',
    'dataset': 'data_name',
    'dataRoot': 'data_root',
    'lrDecay': 'lr_decay',
}
class Config():

    def __init__(self, config, mode='train'):
        self.mode = mode
        self.no_accumulation: bool = False
        self.version: str = 'coviar-lstm'
        self._common = config['common']
        self._train = config['train']
        self._test = config['test']
        if mode == 'train':
            self._set_train_attr()
        elif mode == 'test':
            self._set_test_attr()

        self.num_class = {'ucf101': 101, 'hmdb51': 51,
                          'ylimed': 11, 'trecmed': 21}[self.data_name]

    def _set_train_attr(self):
        # default value
        self.eval_freq: int = 5
        self.num_segments: int = 3
        self.weights: str = ''
        self.workers: int = 8
        self.weight_decay: float = 1e-4
        self.batch_size: int = 40
        self.lr: float = 0.001
        self.epochs: int = 500
        self.lr_steps: list = [200, 300, 400]
        self.lr_decay: float = 0.1
        self.model_prefix: str = 'dev'

        for k, v in self._common.items():
            if k in shortname:
                k = shortname[k]
            setattr(self, k, v)
        for k, v in self._train.items():
            if k in shortname:
                k = shortname[k]
            setattr(self, k, v)

        if self.version == 'coviar-lstm':
            self.num_layers = 1
            self.freeze: bool = False
            self.lstm_lr: float = 0.001
            self.dropout: float = 0
            self.hidden_size: int = 1024

        name = self.NAME
        delattr(self, 'NAME')

        output = project_dir.joinpath('out', name)
        weights_out = output.joinpath('weights')
        scores_out = output.joinpath('scores')
        log_out = output.joinpath('log')

        train_log_file = log_out.joinpath(f'{self.representation}_model.trainlog')
        test_log_file = scores_out.joinpath(f'{self.data_name}_best_{self.representation}_model__scores.out')
        train_flag, test_flag = check_dir(train_log_file, test_log_file)
        self.logf = train_log_file
        self.model_prefix = f'{weights_out}/{self.data_name}'

    def _set_test_attr(self):
        # default value
        self.num_segments: int = 25
        self.batch_size: int = 1
        self.workers: int = 1
        self.test_crops: int = 10

        for k, v in self._common.items():
            if k in shortname:
                k = shortname[k]
            setattr(self, k, v)

        for k, v in self._test.items():
            if k in shortname:
                k = shortname[k]
            setattr(self, k, v)

        if self.version == 'coviar-lstm':
            self.hidden_size: int = 1024
            self.num_layers = 1
            self.dropout: float = 0
            self.lstm_infeature: str = ''
            self.lstm_outfeature: str = ''

        name = self.NAME
        delattr(self, 'NAME')
        output = project_dir.joinpath('out', name)
        weights_out = output.joinpath('weights')
        scores_out = output.joinpath('scores')
        log_out = output.joinpath('log')

        train_log_file = log_out.joinpath(f'{self.representation}_model.trainlog')
        test_log_file = scores_out.joinpath(f'{self.data_name}_best_{self.representation}_model__scores.out')
        train_flag, test_flag = check_dir(train_log_file, test_log_file)
        self.logf = test_log_file

        self.save_scores = f'{scores_out}/{self.data_name}_best_{self.representation}_model__scores'
        self.weights: str = f'{weights_out}/{self.data_name}_{self.representation}_model_best.pth.tar'


from src.train_ng import main
from src.test_ng import main as test
def main2():
    parser = argparse.ArgumentParser(description="parse toml config and start train/test")
    parser.add_argument('--config', type=str, help='toml file for train/test config')
    parser.add_argument('--notrain', action='store_true', help='only test')
    args = parser.parse_args()

    # check environment
    # ret = os.popen('which python').read()
    # pattern = r'.*compress.*'
    # if re.match(pattern, ret) is None:
    #     print('please checkout to virtual env')
    #     return

    config = load_config(args.config)
    train_cfg = Config(config, 'train')
    test_cfg = Config(config, 'test')
    # for k, v in vars(cfg).items():
    #     if k[0] != '_':
    #         print(f'{k}: {v}')

    if not args.notrain:
        main(train_cfg)
    test(test_cfg)


if __name__ == "__main__":
    main2()
