# encoding: utf-8
import os
import re
import json
import shutil
import subprocess

from ...config.base import ORIGIN_MODEL_PATH, ORIGIN_MODEL_NAME, \
    MODEL_SAVE_PATH, MODEL_TRAIN_PATH, MODEL_TRAIN_DATA_PATH, create_file
from ...tools.tool import rm_file

DEFAULT_RUN_NAME = 'debug'

DEFAULT_TRAIN_CONFIG = {
    'data_path': '/app/contract/data/train_data',
    'metrics': 'span',
    'lr': 3e-5,
    'batch_size': 8,
    'grad_acc': 1,
    # 'bert_model_dir': '/app/contract/save_models/model_test/nlp_deberta_rex-uninlu_chinese-base',
    'bert_model_dir': ORIGIN_MODEL_PATH,
    'logging_steps': 100,
    'epochs': 10,
    'lr_type': 'linear',
    'nproc': 1,
    'load_checkpoint': '../',
    'run_name': DEFAULT_RUN_NAME,
    # 'output_dir': '/app/contract/train_models'
    'output_dir': MODEL_TRAIN_PATH,
}


def switch_params(provide_params):
    lr = min(float(provide_params.get('lr', DEFAULT_TRAIN_CONFIG['lr'])), DEFAULT_TRAIN_CONFIG['lr'])
    lr_type = provide_params.get('lr_scheduler_type', DEFAULT_TRAIN_CONFIG['lr_type'])
    batch_size = min(int(provide_params.get('batch_size_per_gpu', DEFAULT_TRAIN_CONFIG['batch_size'])), DEFAULT_TRAIN_CONFIG['batch_size'])
    available_name = {
        'lr': lr,
        'lr_type': lr_type,
        'batch_size': batch_size,
    }
    return available_name


def run_sh(command):
    try:
        # 执行Shell命令并获取输出结果
        result = subprocess.check_output(command, shell=True)

        print("命令执行成功！")
        print("输出结果为:\n", result.decode())
    except subprocess.CalledProcessError as e:
        print("命令执行失败！错误信息为:\n", e.stderr)


def run_sh_with_real_time(command, log_cache):
    """
    实时输出
    :param command:
    :param log_cache:
    :return:
    """
    metric_span = None
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, shell=True)
    while True:
        output = process.stdout.readline()
        if output == '' and process.poll() is not None:
            break
        if output:
            log_text = output.strip()
            if metric_span is None:
                metric_span = analyze_metric_from_log(log_text)
            print(log_text)
            log_cache.add_log_line(log_text)
    # 等待子进程结束并获取返回码
    return_code = process.wait()
    # print("Command exited with return code:", return_code)
    return return_code, metric_span


def analyze_metric_from_log(log_text):
    metric_span = None
    if 'test_precision' in log_text:
        match = re.search(r'\{.*\}', log_text)
        if match:
            json_data = match.group()
            # 将提取到的 JSON 字符串转换为字典
            json_data = json_data.replace('\'', '\"')
            metric_span = json.loads(json_data)
    return metric_span


def test():
    command = 'cd /app/contract/models && ls'
    # run_sh(command)
    subprocess.run(command, shell=True)
    command = 'ls'
    # run_sh(command)
    subprocess.run(command, shell=True)


def run_train(model_params: dict, log_cache):
    path_command = 'cd /app/contract/models/nlp_deberta_rex-uninlu_chinese-base/rex'
    config_commands = []
    for key, value in model_params.items():
        config_commands.append('export ' + str(key) + '=' + str(value))
    run_commands = 'bash scripts/finetune.sh'

    all_commands = [path_command]
    all_commands.extend(config_commands)
    all_commands.append(run_commands)
    actual_command = ' && '.join(all_commands)
    rm_file(MODEL_TRAIN_PATH)
    # run_sh(actual_command)
    return_code, metric_span = run_sh_with_real_time(actual_command, log_cache)
    return return_code, metric_span


def save_model(save_model_name, run_name):
    # 复制原始模型的参数文件
    new_model_path = MODEL_SAVE_PATH + '//' + save_model_name

    rm_file(new_model_path)

    copy_origin_model_config = '''
    rsync -av --exclude='pytorch_model.bin' --exclude='rex' %s %s''' % (ORIGIN_MODEL_PATH, new_model_path)
    run_sh(copy_origin_model_config)

    # 复制新模型
    copy_new_model = '''
    cp -r %s %s 
    ''' % (MODEL_TRAIN_PATH + '//%s//pytorch_model.bin' % run_name, new_model_path + '//' + ORIGIN_MODEL_NAME)
    run_sh(copy_new_model)


# 获取已有模型地址
def get_save_model_path(save_model_name):
    new_model_path = MODEL_SAVE_PATH + '//' + save_model_name + '//' + ORIGIN_MODEL_NAME
    # create_file(new_model_path)
    return new_model_path


# 获取训练数据的存储地址
def get_train_data_path(save_model_name):
    train_data_path = MODEL_TRAIN_DATA_PATH + '//' + save_model_name
    create_file(train_data_path)
    return train_data_path


# 删除训练数据文件
def rm_train_data(data_name):
    if data_name is not None:
        delete_data_path = MODEL_TRAIN_DATA_PATH + '//' + data_name
        rm_file(delete_data_path)

    else:
        for filename in os.listdir(MODEL_TRAIN_DATA_PATH):
            file_path = os.path.join(MODEL_TRAIN_DATA_PATH, filename)
            # 判断是否是文件
            if os.path.isfile(file_path):
                # 删除文件
                os.remove(file_path)

            elif os.path.isdir(file_path):
                # 如果有子文件夹，你可以选择递归删除或者保留
                rm_file(file_path)


if __name__ == '__main__':
    # test()
    run_train(DEFAULT_TRAIN_CONFIG)
    # save_model('model_test')