import os
import logging
import time
from pathlib import Path
import config as cfg

import torch
import torch.optim as optim

def create_logger(phase='train'):
    '''
    返回三个内容：一个logger对象，一个用来记录logger的目录，一个用来记录tensorboard的目录
    '''
    log_dir = Path(os.path.join(cfg.test_output_dir,'results'))

    #路径拼接
    final_output_dir = log_dir.joinpath(cfg.data_type)
    final_output_dir.mkdir(parents=True, exist_ok=True)

    time_str = time.strftime('%Y-%m-%d-%H-%M')
    final_log_file = final_output_dir.joinpath(f'{time_str}_{phase}.log')

    logging.basicConfig(filename=str(final_log_file),format='%(asctime)s - %(levelname)s - %(message)s')   #创建一个日志记录器
    logger = logging.getLogger()    #进行初始化
    logger.setLevel(logging.INFO)
    console = logging.StreamHandler()
    logging.getLogger('').addHandler(console)

    tensorboard_dir = Path(os.path.join(cfg.test_output_dir,'tensorboard'))
    tensorboard_dir.mkdir(parents=True, exist_ok=True)

    return logger, str(log_dir), str(tensorboard_dir)

def get_optimizer(cfg, model):
    optimizer = None
    if cfg.TRAIN.OPTIMIZER == 'sgd':
        optimizer = optim.SGD(
            model.parameters(),
            lr=cfg.TRAIN.LR,
            momentum=cfg.TRAIN.MOMENTUM,
            weight_decay=cfg.TRAIN.WD,
            nesterov=cfg.TRAIN.NESTEROV
        )
    elif cfg.TRAIN.OPTIMIZER == 'adam':
        optimizer = optim.Adam(
            model.parameters(),
            lr=cfg.TRAIN.LR
        )
    return optimizer


def save_checkpoint(states, is_best, output_dir,
                    filename='checkpoint.pth.tar'):
    torch.save(states, os.path.join(output_dir, filename))
    if is_best and 'state_dict' in states:
        torch.save(states['state_dict'],
                   os.path.join(output_dir, 'model_best.pth.tar'))

if __name__ == '__main__':
    logger, final_output_dir, tb_log_dir = create_logger('valid')
    print(1)
