#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:       
@Date     :2022/10/26 14:52:52
@Author      :Charon
@version      :1.0
'''
import copy
import logging
from typing import Dict

from accelerate import Accelerator, load_checkpoint_and_dispatch, DistributedDataParallelKwargs
accelerator = Accelerator(kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=True)])
# accelerator = Accelerator()
import os
os.environ['TOKENIZERS_PARALLELISM'] = "false"
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.set_seed import set_seed
import time
import wandb
from utils import get_parser, logger
from model_utils import trainer
from prettytable import PrettyTable
Table_Handle = PrettyTable()

# 返回配置的信息
def parser_config(config)-> Dict[str, object]:
    text_string = {}
    for arg in vars(config):
        if getattr(config, arg):
            text_string[arg]=getattr(config, arg)
    return text_string

if __name__=='__main__':
    parser = get_parser.get_basic_parser_for_other_train()
    config = parser.parse_args()
    set_seed(config.seed)
    accelerator.print(accelerator.state)
    for arg in vars(config):
        if getattr(config, arg):
            accelerator.print(arg, getattr(config, arg))
    accelerator.print(time.strftime('%Y-%m-%d %H:%M:%S'))
    wandb_run, _logger = None,None
    if accelerator.is_local_main_process:
        if len(config.log_path)>0:
            _logger = logger.Logger(root_path=config.log_path, prefix_fileName='test_dataset')
        wandb_tags = [f'{accelerator.num_processes}GPU', 'weight_loss']

        wandb_run = wandb.init(project=config.project ,
                         name=config.model_name,
                         config=parser_config(config),
                         tags=wandb_tags,
                         notes='',
                         mode=config.wandb_mode if not config.inference else 'disabled') # "online", "offline" or "disabled". Defaults to online.
        
        
    main = trainer.Main(config=config, _logger=_logger)
    
    if not config.inference:
        main()
        # 仅在主进程上预测
        if accelerator.is_local_main_process:
            # 由于是分布式训练的问题，测试集不在同一张卡上，因此没有办法得到整体的metric，为此需要再最后在单卡上重新预测test dataset结果
            result = main.predictor(config.test_path, already=True)
            Table_Handle.clear()
            Table_Handle.field_names = ['Names','Values']
            Table_Handle.add_rows(
                [[key, value] for key,value in result.items()]
                )
            print(Table_Handle)
            wandb_run.finish()
            if config.wandb_mode == "online":
                api = wandb.Api()
                wandb_run = api.run(f'charon_hn/{config.project}/{config.id}')
                wandb_run.summary['TEST']['all_f1_score'] = result['f1_score']
                wandb_run.summary['TEST']['all_precision'] = result['precision']
                wandb_run.summary['TEST']['all_recall'] = result['recall']
                # 重新将wandb中summary的结果更新为最好的结果（默认情况下是取最后一次的结果）
                df = wandb_run.history()
                for index in df.index.values:
                    TEST_precision_score = df.at[index,'TEST.precision_score']
                    TEST_recall_score = df.at[index,'TEST.recall_score']
                    TEST_f1_score = df.at[index,'TEST.f1_score']
                    if TEST_f1_score > wandb_run.summary['TEST']['f1_score']:
                        wandb_run.summary['TEST']['f1_score'] = TEST_f1_score
                        wandb_run.summary['TEST']['recall_score'] = TEST_recall_score
                        wandb_run.summary['TEST']['precision_score'] = TEST_precision_score
                wandb_run.summary.update()
