# 执行测试相关接口
import os
import psutil
import subprocess
import threading
import re
import socket
from datetime import datetime
import time, yaml, json, paramiko
from itertools import cycle
from flask import Blueprint, request, send_file,session
from Logger import logger, logger_config
import xml.etree.ElementTree as ET
from utils import MyResponse, get_global_config, get_local_ip, handle_exception, handle_success, \
    compress_directory_to_zip, to_lower_camel_case
from utilsNetwork import execute_command_linux, check_jmeter_start
from db_handler import db_handler
from concurrent.futures import ThreadPoolExecutor
from configparser import ConfigParser

project = Blueprint('project', __name__)

# 跨平台路径处理函数
def unix_style_path(path):
    """强制转换为Unix风格路径"""
    return path.replace('\\', '/')

def safe_join(*args):
    """安全的跨平台路径拼接"""
    path = os.path.normpath(os.path.join(*args))
    if get_global_config().getboolean('paths', 'unix_style_path', fallback=False):
        return unix_style_path(path)
    return path

def update_json_file(json_file_path, ip, is_select):
    json_file_path = safe_join(json_file_path)
    try:
        with open(json_file_path, 'r', encoding='utf-8') as file:
            data = json.load(file)

        modified = False
        for entry in data:
            if entry["ip"] == ip:
                entry["is_select"] = is_select
                modified = True
                break

        if modified:
            with open(json_file_path, 'w', encoding='utf-8') as file:
                json.dump(data, file, indent=4, ensure_ascii=False)
            logger.info(f"成功更新 {ip} 的状态")
        else:
            logger.warning(f"未找到IP {ip} 的配置项")

    except Exception as e:
        logger.error(f"更新JSON文件失败: {e}")
        raise


def get_jtl_list(jtl_path, report_info_path):
    # 获取生成的jtl文件名
    if os.path.exists(report_info_path):
        os.remove(report_info_path)

    with open(report_info_path, 'w', encoding='utf-8') as file:
        file.write("[]")
    if os.path.isdir(jtl_path):
        # 获取目录下所有文件名
        jtl_files = os.listdir(jtl_path)
        # 过滤以 kpi 或 error 结尾的文件
        filtered_jtl_files = [file for file in jtl_files if
                              file.endswith('.jtl') and not (
                                      file.endswith('kpi.jtl') or file.endswith('error.jtl'))]
        jtl_data = []
        if filtered_jtl_files:
            # 生成 JSON 对象
            for file in filtered_jtl_files:
                jtl_data.append({
                    "jtlName": file.replace('.jtl', ''),  # 去掉文件后缀
                    "reportIsExist": "False",  # 默认为 "False"
                })
            # 将 JSON 对象写入文件
            with open(report_info_path, 'w') as json_file:
                json.dump(jtl_data, json_file, indent=4, ensure_ascii=False)

# 创建一个字典来存储项目的执行状态和锁对象
project_status = {}

def run(data, lock_user,project_id,task_run_id):
    project_name = data["projectName"]
    PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 根目录
    json_file_path = os.path.join(PROJECT_ROOT, 'Solo\static\machineInfo.json')
    root_path = get_global_config()['rootPath']
    jmeter_path = get_global_config()['jmeterPath']
    yaml_file = os.path.join(root_path, data["projectName"], data["projectName"])+ ".yaml"
    yaml_filenew = os.path.join(root_path, data["projectName"], data["projectName"]) + "_run.yaml"
    jtlpath = os.path.join(root_path, data["projectName"], 'jtl')
    project_logger = logger_config(log_path=data["project_log"], logging_name=data["projectName"])

    # 修正为跨平台的绝对路径获取方式
    current_dir = os.path.dirname(os.path.abspath(__file__))
    runtohtml_path = os.path.normpath(os.path.join(current_dir, 'RunToHtml.py'))

    # 添加路径存在性检查
    if not os.path.exists(runtohtml_path):
        project_logger.error(f"RunTohtml.py路径不存在: {runtohtml_path}")
        raise FileNotFoundError(f"RunTohtml.py not found at {runtohtml_path}")

    # commands = ["rd /s /q " + jtlpath, "bzt -q " + yaml_filenew,
    #             "python  " + runtohtml_path + ' ' + jmeter_path + ' ' + jtlpath + ' ' + yaml_file+ ' ' + str(project_id)+ ' ' + str(task_run_id)]
    # commands = ["ping www.baidu.com", "ping cn.bing.com", "ipconfig"]

    # 根据操作系统生成不同的命令
    if os.name == 'nt':  # Windows
        commands = [
            f"rd /s /q {jtlpath}" if os.name == 'nt' else f"rm -rf {jtlpath}",
            f"bzt -q {yaml_filenew}",
            f"python {runtohtml_path} {jmeter_path} {jtlpath} {yaml_file} {project_id} {task_run_id}"
        ]
    else:  # Linux
        commands = [
            f"rm -rf {jtlpath}",
            f"/usr/local/python37/bin/bzt -q {yaml_filenew}",
            f"python3 {runtohtml_path} {jmeter_path} {jtlpath} {yaml_file} {project_id} {task_run_id}"
        ]
    print("CKJ"+jtlpath)
    # 创建一个新的锁对象和执行状态
    lock = threading.Lock()
    project_status[project_name] = {"running": True, "lock": lock, "start_time": time.time(),
                                    "project_logger": project_logger, "process": None}

    # 定义超时处理函数
    def timeout_handler():
        if project_name in project_status:
            project_info = project_status[project_name]
            process = project_info["process"]
            if process and process.poll() is None:
                project_logger.warning("测试执行超时，正在强制终止进程及其子进程...")
                try:
                    parent = psutil.Process(process.pid)
                    children = parent.children(recursive=True)
                    for child in children:
                        project_logger.info(f"终止子进程 PID: {child.pid}")
                        child.terminate()
                    _, still_alive = psutil.wait_procs(children, timeout=5)
                    for child in still_alive:
                        project_logger.info(f"强制杀死子进程 PID: {child.pid}")
                        child.kill()
                    project_logger.info("所有子进程已被终止。")

                    parent.terminate()
                    try:
                        parent.wait(5)
                    except psutil.TimeoutExpired:
                        project_logger.info(f"强制杀死父进程 PID: {parent.pid}")
                        parent.kill()
                except psutil.NoSuchProcess:
                    project_logger.warning("进程已不存在。")
                except Exception as e:
                    project_logger.error(f"终止进程时发生错误: {e}")

                # 更新任务状态为超时
                update_status_sql = 'UPDATE taskrun SET status = %s WHERE id = %s'
                db_handler.execute_update(update_status_sql, ('timeout', task_run_id))
                # 释放测试机
                update_sql = 'UPDATE slaver SET lock_user= %s, is_select= %s WHERE lock_user=%s'
                db_handler.execute_update(update_sql, ('', 0, lock_user))
                project_logger.info("释放测试机并更新状态。")
                # 清理项目状态
                del project_status[project_name]

    # 从配置文件读取超时时间
        # 修改键名为 'runTimeout'
    run_timeout = int(get_global_config()['runTimeout'])
    project_logger.info(f"操作人: {lock_user} - 测试执行超时时间为 {run_timeout} 秒。")
    timer = threading.Timer(run_timeout, timeout_handler)

    # 获取锁对象并执行测试任务
    with lock:
        try:
            timer.start()  # 启动定时器
            for cmd in commands:
                project_logger.info(cmd)
                project_status[project_name]["process"] = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                                                                           stderr=subprocess.STDOUT,
                                                                           bufsize=1)
                for line in iter(project_status[project_name]["process"].stdout.readline, b''):
                    project_logger.info(line.decode('utf-8', errors='ignore'))
            project_status[project_name]["process"].stdout.close()
            project_status[project_name]["process"].kill()
            return_code = project_status[project_name]["process"].wait()
            logger.info("-------------------done----------------- 返回码: %s", return_code)
            task_status = 'completed' if return_code == 0 else 'failed'
            logger.info("task status has change---completed")
        except Exception as e:
            logger.error(f"Error while running project {project_name}: {e}")
            task_status = 'failed'
        finally:
            # 取消定时器
            try:
                timer.cancel()
                logger.info(f"操作人{lock_user}：定时器已取消")
            except Exception as e:
                logger.error(f"操作人{lock_user}：取消定时器失败: {e}")
            
            # 删除项目状态
            try:
                if project_name in project_status:
                    del project_status[project_name]
                    logger.info(f"操作人{lock_user}：已删除项目状态 {project_name}")
                else:
                    logger.warning(f"操作人{lock_user}：项目状态字典中未找到项目名: {project_name}")
            except Exception as e:
                logger.error(f"操作人{lock_user}：删除项目状态失败: {e}")
            
            # 更新任务状态
            try:
                update_status_sql = 'UPDATE taskrun SET status = %s WHERE id = %s'
                db_handler.execute_update(update_status_sql, (task_status, task_run_id))
                logger.info(f"操作人{lock_user}：更新任务状态为 {task_status} 成功")
            except Exception as e:
                logger.error(f"操作人{lock_user}：更新任务状态失败: {e}")
            
            # 释放测试机
            try:
                update_sql = 'UPDATE slaver SET lock_user= %s, is_select= %s WHERE lock_user=%s'
                db_handler.execute_update(update_sql, ('', 0, lock_user))
                logger.info(f"操作人{lock_user}：释放测试机成功")
            except Exception as e:
                logger.error(f"操作人{lock_user}：释放测试机失败: {e}")

            # 获取生成的jtl文件名存放在json中
            report_info_path = os.path.join(root_path, data["projectName"], 'ReportInfo.json')
            get_jtl_list(jtlpath, report_info_path)

def get_username_password(target_ip, data):
    for item in data:
        if item['ip'] in target_ip:
            username = item['username']
            password = item['password']
            return username, password
    return None, None


# 执行测试
@project.route('/file/runProject', methods=['POST'])
def run_project():
    try:
        params = request.json
        user_id=session['userid']
        project_name = params["projectName"]
        root_path = get_global_config()['rootPath']
        target_ip = params["distributedValue"]
        scenario_names = params.get("scenarioNames", [])
        lock_user = session['loginname'] # 当前操作人
        PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 根目录
        json_file_path = os.path.join(PROJECT_ROOT, 'Solo\static\machineInfo.json')
        print(project_name, project_status, project_name in project_status)

        if not os.path.exists(root_path):
            return handle_exception("根路径错误", "全局配置错误，请联系管理员进行配置")

        # 检查负载机是否启用或已被占用
        if target_ip:
            format_strings = ','.join(['%s'] * len(target_ip))
            check_sql = f"SELECT ip, status, is_select FROM Slaver WHERE ip IN ({format_strings})"
            slaver_status = db_handler.execute_query(check_sql, tuple(target_ip))
            for slaver in slaver_status:
                if slaver['status'] != 1:
                    return handle_exception("error", f"负载机 {slaver['ip']} 未启用")
                if slaver['is_select'] == 1:
                    return handle_exception("error", f"负载机 {slaver['ip']} 已被占用")

        yaml_file = os.path.join(root_path, project_name, project_name) + ".yaml"
        if os.path.isfile(yaml_file):  # 判断项目的yaml文件存在
            # 检查项目是否已经在执行中
            if project_name in project_status:
                return handle_exception("error", "该项目正在执行测试任务，请等待...")
            else:
                project_log = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs",
                                           project_name + ".log")
                if os.path.exists(project_log):  # 是否已经存在项目日志文件，是则清空
                    with open(project_log, "a+") as file:
                        file.truncate(0)
                data = {"project_log": project_log, "projectName": project_name}

                # 检查所选的 target_ip 是否已被占用
                if target_ip:
                    format_strings = ','.join(['%s'] * len(target_ip))
                    check_sql = f"SELECT ip FROM Slaver WHERE ip IN ({format_strings}) AND is_select = 1"
                    occupied_ips = db_handler.execute_query(check_sql, tuple(target_ip))
                    if occupied_ips:
                        occupied_ip_list = [row['ip'] for row in occupied_ips]
                        occupied_ips_str = ', '.join(occupied_ip_list)
                        return handle_exception(
                            {"msg": f"测试机 {occupied_ips_str} 已被占用"},
                            f"测试机 {occupied_ips_str} 已被占用，请选择其他机器"
                        )

                #  锁定测试机，将测试机选择状态改为True
                if target_ip:
                    update_sql = f"UPDATE Slaver SET lock_user = %s, is_select = %s WHERE ip IN ({','.join(['%s'] * len(target_ip))})"
                    update_params = [lock_user, 1] + target_ip
                    db_handler.execute_update(update_sql, tuple(update_params))
                # if target_ip:  # 检查target_ip是否为空
                #     for ip in target_ip:
                #         update_json_file(json_file_path, ip, True)

                #向数据库插入运行任务记录
                project_id=db_handler.execute_query("SELECT id FROM project WHERE project_name = %s",(project_name,))[0]["id"]
                slaver_ip=", ".join(f"'{item}'" for item in target_ip)
                run_scenarios=", ".join(f"'{item}'" for item in scenario_names)
                run_user=session["loginname"]
                run_ip=session["client_ip"]
                run_time=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
                insert_sql = "INSERT INTO taskrun (project_id, slaver_ip, run_ip, run_user, run_time, run_scenarios, status) VALUES (%s, %s, %s, %s, %s, %s, %s)"
                insert_value = (project_id, slaver_ip, run_ip, run_user, run_time, run_scenarios, 'running')
                db_handler.insert(insert_sql,insert_value)

                # 获取插入的 task_run_id
                task_run_id = db_handler.execute_query("SELECT MAX(id) AS id from taskrun where project_id = %s ",(project_id,))[0]["id"]
                single_t = threading.Thread(target=run, args=(data, lock_user,project_id,task_run_id))  # 开启一个异步线程执行命令行
                single_t.start()

        else:
            return handle_exception("error", "yaml文件不存在")
    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success("success", "测试成功运行")


# 检查测试状态
@project.route('/file/checkRun', methods=['POST'])
def check_run():
    try:
        params = request.json
        now_time = time.time()
        count = None
        is_running = False
        projectName = params["projectName"]
        project_log = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs",
                                   params["projectName"] + ".log")
        if os.path.exists(project_log):  # 是否已经存在项目日志文件
            with open(project_log, 'r', encoding="utf-8") as f:
                print("打开文件")
                lines = f.readlines()  # 读取所有行
                content = ''.join(line for line in lines if line.strip())  # 删除空白行
                print(content)
        else:
            content = "等待测试执行。。。"

        # 获取项目的执行状态和日志内容
        if projectName in project_status:
            is_running = project_status[projectName].get("running", False)
            count = round(now_time - project_status[projectName].get("start_time", 0))
    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success({"isRunning": is_running, "content": content, "count": count}, "success")

@project.route('/file/stopProject', methods=['POST'])
def stop_project():
    try:
        params = request.json
        project_name = params["projectName"]
        target_ip = params["distributedValue"]
        current_user = session['loginname']  # 当前操作人
        PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 根目录
        json_file_path = os.path.join(PROJECT_ROOT, 'Solo\static\machineInfo.json')

        logger.info(f"用户：{current_user} 终止项目测试: {project_name}")
        rootPath=get_global_config()['rootPath']
        testplans_dir = os.path.join(rootPath, project_name, 'testplans')

        for file_name in os.listdir(testplans_dir):
            if file_name.startswith('modified_') and file_name.endswith('.jmx'):
                file_path = os.path.join(testplans_dir, file_name)
                os.remove(file_path)
                logger.info(f"删除文件: {file_path}")

        if project_name in project_status:
            project_info = project_status[project_name]
            process = project_info["process"]

            # 获取当前正在运行的任务的 task_run_id
            project_id = db_handler.execute_query("SELECT id FROM project WHERE project_name = %s", (project_name,))[0][
                "id"]
            lock_user = \
                db_handler.execute_query("SELECT run_user FROM taskrun WHERE project_id = %s AND status = 'running'",
                                         (project_id,))[0]["run_user"]
            task_run_id = \
                db_handler.execute_query("SELECT id FROM taskrun WHERE project_id = %s AND status = 'running'",
                                         (project_id,))[0]["id"]

            # 释放测试机，将测试机选择状态改为False
            # if target_ip:  # 检查target_ip是否为空
            #     for ip in target_ip:
            #         update_json_file(json_file_path, ip, False)
            update_sql = 'UPDATE slaver SET lock_user= %s,is_select= %s  WHERE lock_user=%s'
            db_handler.execute_update(update_sql, ('', 0, lock_user))

            # 更新任务状态为 stopped
            update_status_sql = 'UPDATE taskrun SET status = %s WHERE id = %s'
            db_handler.execute_update(update_status_sql, ('stopped', task_run_id))

            if process is not None:
                try:
                    # 获取所有子进程并终止它们
                    parent = psutil.Process(process.pid)
                    for child in parent.children(recursive=True):
                        logger.info(f"操作人{current_user}：Terminating child process {child.pid} of project {project_name}")
                        child.terminate()
                    parent.terminate()

                    # 等待进程终止
                    gone, still_alive = psutil.wait_procs(parent.children(recursive=True), timeout=3)
                    for p in still_alive:
                        logger.info(f"操作人{current_user}：Killing child process {p.pid} of project {project_name}")
                        p.kill()
                    parent.kill()

                    # 确保主进程也被终止
                    gone, still_alive = psutil.wait_procs([parent], timeout=3)
                    if still_alive:
                        for p in still_alive:
                            logger.info(f"操作人{current_user}：Killing main process {p.pid} of project {project_name}")
                            p.kill()

                except psutil.NoSuchProcess as e:
                    logger.warning(f"操作人{current_user}：No such process: {e}")
                except Exception as e:
                    logger.error(f"操作人{current_user}：Error while stopping project {project_name}: {e}")
                finally:
                    # 清理项目状态
                    project_info["process"] = None
                    del project_status[project_name]
                    # if target_ip:

    except Exception as err:
        return handle_exception(str(err), "程序异常")
    else:
        return handle_success("success", "进程已停止")

def check_port(ip, port=4444):
    try:
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
            s.settimeout(2)
            result = s.connect_ex((ip, port))
        return ip, result == 0
    except Exception as e:
        return ip, False

def process_scenario(scenario_name, scenario_details, root_path, project_name, log_messages, is_open, pattern, ip_pattern):
    file_name = f"{scenario_name}.jmx"
    # 跨平台路径处理
    jmx_path = os.path.join(root_path, project_name, "testplans", file_name)
    jmx_path = os.path.normpath(jmx_path).replace("\\", "/")  # 统一转换为正斜杠

    if not os.path.exists(jmx_path):
        log_messages.append(f"ERROR: {file_name} 不存在")
        return
    try:
        tree = ET.parse(jmx_path)
        root = tree.getroot()

        collectors = root.findall('.//kg.apc.jmeter.perfmon.PerfMonCollector')
        has_perfmon_plugin = len(collectors) > 0
        has_res_cpu = any(c.get('testname') == 'Res_CPU' for c in collectors)
        has_res_memory = any(c.get('testname') == 'Res_Memory' for c in collectors)

        if not has_perfmon_plugin:
            log_messages.append(f"ERROR: {file_name} 开启资源监控失败，脚本中缺少 PerfMon Metrics Collector 插件")
            return

        if has_res_cpu and has_res_memory:
            with open(jmx_path, 'r', encoding='utf-8') as jmx_file:
                jmx_content = jmx_file.read()

            matches = pattern.search(jmx_content)
            if matches:
                metric_connections_content = matches.group(1)
                ip_addresses = set(ip_pattern.findall(metric_connections_content))

                with ThreadPoolExecutor(max_workers=10) as executor:
                    results = list(executor.map(check_port, ip_addresses))

                all_agents_active = True
                for ip, is_active in results:
                    if is_active:
                        log_messages.append(f"检测到 {file_name} 中设置的待监控服务器 {ip} 的 agentServer 已开启，{file_name} 成功开启资源监控")
                    else:
                        log_messages.append(f"WARNING: 检测到 {file_name} 中设置的待监控服务器【{ip}】的 agentServer 未开启，{file_name} 开启资源监控失败！")
                        all_agents_active = False

                if all_agents_active:
                    scenario_details.setdefault('modifications', {})['disable'] = []
            else:
                log_messages.append(f"ERROR: 未找到 {file_name} 中的 <collectionProp name=\"metricConnections\"> 标签")
        else:
            if not has_res_cpu:
                log_messages.append(f"ERROR: {file_name} 开启资源监控失败，脚本中缺少 Res_CPU")
            if not has_res_memory:
                log_messages.append(f"ERROR: {file_name} 开启资源监控失败，脚本中缺少 Res_Memory")
    except ET.ParseError as e:
        log_messages.append(f"ERROR: 解析 {file_name} 时发生错误: {e}")
    except Exception as e:
        log_messages.append(f"ERROR: 处理 {file_name} 时发生意外错误: {e}")

# 是否开启监控资源按钮
@project.route('/file/IsOpenMonitor', methods=['POST'])
def is_open_monitor():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        project_name = params["projectName"]
        yaml_path = os.path.join(root_path, project_name, f'{project_name}.yaml')
        is_open = params['isMonitor']
        project_log = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs", f"{project_name}.log")

        log_messages = []

        # 读取 YAML 文件
        with open(yaml_path, 'r', encoding='utf-8') as file:
            data = yaml.safe_load(file)

        # 预编译正则表达式
        pattern = re.compile(r'<kg\.apc\.jmeter\.perfmon\.PerfMonCollector(.*?)</kg\.apc\.jmeter\.perfmon\.PerfMonCollector>', re.DOTALL)
        ip_pattern = re.compile(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b')

        if is_open:
            with ThreadPoolExecutor(max_workers=10) as executor:
                tasks = [
                    executor.submit(process_scenario, scenario_name, scenario_details, root_path, project_name, log_messages, is_open, pattern, ip_pattern)
                    for scenario_name, scenario_details in data.get('scenarios', {}).items()
                ]
                for future in tasks:
                    try:
                        future.result()  # 等待所有任务完成，捕获可能的异常
                    except Exception as e:
                        log_messages.append(f"ERROR: 处理场景时发生错误: {e}")

        else:
            for scenario_details in data.get('scenarios', {}).values():
                scenario_details.setdefault('modifications', {})['disable'] = ['Res_CPU', 'Res_Memory']

        # 批量写入日志
        with open(project_log, 'w', encoding='utf-8') as log_file:
            log_file.write('\n'.join(log_messages))

        # 写回更新后的 YAML 数据
        with open(yaml_path, 'w', encoding='utf-8') as file:
            yaml.dump(data, stream=file, allow_unicode=True)

    except Exception as err:
        return handle_exception(err, "程序异常")
    else:
        return handle_success(is_open, "success")

# def is_open_monitor():
#     try:
#         params = request.json
#         root_path = get_global_config()['rootPath']
#         yaml_path = os.path.join(root_path, params["projectName"], params["projectName"] + '.yaml')
#         is_open = params['isMonitor']
#
#         project_log = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs",
#                                    params["projectName"] + ".log")
#
#         if os.path.exists(project_log):  # 是否已经存在项目日志文件，是则清空
#             with open(project_log, "a+") as file:
#                 file.truncate(0)
#
#         # 读取 YAML 文件
#         with open(yaml_path, 'r', encoding='utf-8') as file:
#             data = yaml.safe_load(file)
#         # file_names = [entry['scenario'] + '.jmx' for entry in data['execution']]
#
#         # 检查并修改 disable 字段
#         for scenario_name, scenario_details in data['scenarios'].items():
#             if is_open:
#                 print(scenario_name)
#                 file_name = scenario_name + '.jmx'
#                 jmx_path = os.path.join(root_path, params["projectName"], "testplans", file_name)
#
#                 if os.path.exists(jmx_path):
#                     tree = ET.parse(jmx_path)  # 解析jmx文件
#                     root = tree.getroot()  # 获取根节点
#
#                     has_perfmon_plugin = False
#                     has_res_cpu = False
#                     has_res_memory = False
#
#                     # 检查是否存在 Res_CPU 和 Res_Memory 组件
#                     for element in root.findall(
#                             './/kg.apc.jmeter.perfmon.PerfMonCollector'):  # 查找所有的kg.apc.jmeter.perfmon.PerfMonCollector节点
#                         has_perfmon_plugin = True
#                         testname = element.get('testname')
#                         if testname == 'Res_CPU':
#                             has_res_cpu = True
#                         elif testname == 'Res_Memory':
#                             has_res_memory = True
#
#                     # 写入日志并修改 disable 字段
#                     with open(project_log, "a+", encoding="utf-8") as file:
#                         if not has_perfmon_plugin:
#                             file.write(f"ERROR: {file_name}开启资源监控失败，脚本中缺少PerMon Metrics Collector插件\n")
#                         else:
#                             if has_res_cpu and has_res_memory:
#                                 # 打开JMX文件并读取内容
#                                 with open(jmx_path, 'r', encoding='utf-8') as jmx_file:
#                                     jmx_content = jmx_file.read()
#                                 # 使用正则表达式匹配<collectionProp name="metricConnections">标签内的内容
#                                 pattern = r'<kg.apc.jmeter.perfmon.PerfMonCollector(.*?)</kg.apc.jmeter.perfmon.PerfMonCollector>'
#                                 matches = re.search(pattern, jmx_content, re.DOTALL)
#                                 if matches:
#                                     metric_connections_content = matches.group(1)
#                                     # 使用正则表达式匹配IP地址
#                                     ip_addresses = set(
#                                         re.findall(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', metric_connections_content))
#
#                                     # 定义要检查的端口号
#                                     port = 4444
#
#                                     has_agent_server = True
#                                     # 遍历唯一的IP地址
#                                     for ip in ip_addresses:
#                                         with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
#                                             s.settimeout(2)
#                                             result = s.connect_ex((ip, port))
#
#                                         # 检查telnet命令的退出代码
#                                         if result == 0:
#                                             file.write(
#                                                 f"检测到{file_name}中设置的待监控服务器{ip}的agentServer已开启， {file_name}成功开启资源监控\n")
#                                         else:
#                                             file.write(
#                                                 f"WARNING:检测到{file_name}中设置的待监控服务器【{ip}】的agentServer未开启，{file_name}开启资源监控失败！\n")
#                                             has_agent_server = False
#                                 else:
#                                     print("未找到<collectionProp name=\"metricConnections\">标签")
#                             if not has_res_cpu:
#                                 file.write(f"ERROR: {file_name}开启资源监控失败，脚本中缺少Res_CPU\n")
#                             if not has_res_memory:
#                                 file.write(f"ERROR: {file_name}开启资源监控失败，脚本中缺少Res_Memory\n")
#
#                     if has_perfmon_plugin and has_res_cpu and has_res_memory and has_agent_server:
#                         scenario_details['modifications']['disable'] = []
#                 else:
#                     # 写入日志并修改 disable 字段
#                     with open(project_log, "a+", encoding="utf-8") as file:
#                         file.write(f"ERROR: {file_name}不存在\n")
#
#             else:
#                 scenario_details['modifications']['disable'] = ['Res_CPU', 'Res_MEM']
#         with open(yaml_path, 'w', encoding='utf-8') as file:
#             yaml.dump(data, stream=file, allow_unicode=True)
#
#     except Exception as err:  # 捕获异常
#         return handle_exception(err, "程序异常")
#     else:
#         return handle_success(is_open, "success")


# 获取监控资源按钮状态
@project.route('/file/getMonitorStatus', methods=['POST'])
def get_monitor_status():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        yaml_path = os.path.join(root_path, params["projectName"], params["projectName"] + '.yaml')

        # 读取 YAML 文件
        with open(yaml_path, 'r', encoding='utf-8') as file:
            data = yaml.safe_load(file)

        # 遍历 YAML 数据中的 scenarios
        for scenario, config in data.get('scenarios', {}).items():
            disable_components = config.get('modifications', {}).get('disable', [])
            if not disable_components:
                return handle_success({"status": True}, "开启资源监控")
            else:
                if isinstance(disable_components, list):
                    if "Res_CPU" in disable_components or "Res_Memory" in disable_components:
                        return handle_success({"status": False}, "关闭资源监控")
                    else:
                        return handle_success({"status": True}, "开启资源监控")
                else:
                    return handle_success({"status": False}, "禁用组件信息错误")
    except Exception as err:
        return handle_exception(err, "程序异常")


# 进入执行测试的页面时查询YAML文件中distributed的值
@project.route('/file/getDistributedValues', methods=['POST'])
def get_distributed_values():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        project_dir = os.path.join(root_path, params["projectName"])
        path = os.path.join(project_dir, params["projectName"] + '.yaml')

        with open(path, 'r', encoding='utf-8') as file:
            data = yaml.safe_load(file)

        distributed_values = None

        # 遍历 execution 中的每个元素
        for execution_item in data.get('execution', []):
            # 检查该元素是否有 distributed 字段
            distributed = execution_item.get('distributed', None)
            if distributed:
                # 如果有 distributed 字段，则将其值赋给distributed_value，并立即返回
                distributed_values = distributed
                break
    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(distributed_values, "success")

# 获取项目可用的测试机
@project.route('/getUseableSlaveIp', methods=['POST'])
def get_useable_slave_ip():
    try:
        params = request.json
        project_name = params.get("projectName")
        query = "SELECT slaver_ip FROM project WHERE project_name= %s"
        results = db_handler.execute_query(query, (project_name,))
        print(results)
        slaver_ips = results[0].get("slaver_ip")
        if not slaver_ips:  # 如果 slaver_ips 为空，则直接设置 slaver_ip 为空字符串
            slaver_ip = ''
        else:
            # 过滤掉已被选择的测试机
            ip_list = [ip.strip() for ip in slaver_ips.split(',')]
            format_strings = ','.join(['%s'] * len(ip_list))
            query = f"SELECT ip FROM slaver WHERE ip IN ({format_strings}) AND status = %s AND is_select = 0"
            useable_slaver_ip = db_handler.execute_query(query, (*ip_list, 1))
            print(useable_slaver_ip)
            slaver_ips = [row['ip'] for row in useable_slaver_ip]
            slaver_ip = ", ".join(slaver_ips)

    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(slaver_ip, "查询成功")
# 查询启用状态的测试机
# 测试配置页面-点击选择测试机按钮-
@project.route('/file/getActiveMachines', methods=['POST'])
def get_active_machines():
    try:
        # params = request.json
        # project_name = params.get("projectName")
        # if not project_name:
        #     return handle_exception("缺少项目名称", "请提供项目名称")
        #
        # # 获取项目的 slaver_ip
        # project_query = "SELECT slaver_ip FROM Project WHERE project_name = %s"
        # project = db_handler.execute_query(project_query, (project_name,))
        # if not project:
        #     return handle_exception("项目不存在", "未找到对应的项目")
        #
        # slaver_ip = project[0].get("slaver_ip")
        #
        # if slaver_ip:
        #     # 如果 slaver_ip 不为空，查询这些 IP 是否可用
        #     ip_list = [ip.strip() for ip in slaver_ip.split(',')]
        #     format_strings = ','.join(['%s'] * len(ip_list))
        #     query = f"SELECT * FROM slaver WHERE ip IN ({format_strings}) AND status = %s AND is_select = 0"
        #     active_machines = db_handler.execute_query(query, (*ip_list, 1))
        # else:
        #     # 如果 slaver_ip 为空，返回所有可用的负载机 IP
        #     query = "SELECT * FROM slaver WHERE status = %s AND is_select = 0"
        #     active_machines = db_handler.execute_query(query, (1,))
        query = "SELECT *  FROM slaver WHERE status= %s"
        active_machines = db_handler.execute_query(query, (1,))


    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(active_machines, "查询成功")


def get_machine_credentials(ip):
    # PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 根目录
    # json_file_path = os.path.join(PROJECT_ROOT, 'Solo\static\machineInfo.json')
    # with open(json_file_path, 'r') as file:
    #     data = json.load(file)
    #
    # credentials = []
    # for machine in data:
    #     if machine['ip'] in ip:
    #         credentials.append((machine['username'], machine['password']))
    credentials = []
    query_sql="SELECT username,password  FROM slaver WHERE ip= %s "
    for ip1 in ip:
        try:
            res = db_handler.execute_query(query_sql, (ip1,))
            if res:
                credentials.append((res[0]["username"], res[0]["password"]))
        except Exception as e:
            print(f"An error occurred while retrieving credentials for IP {ip}: {e}")
    return credentials


class NoAliasDumper(yaml.SafeDumper):
    def ignore_aliases(self, data):
        return True

# 修改YAML中distributed的值
# 点击选择测试机-弹框-选择-确定
# 参数：projectName、distributedValue
# 返回值：{}
@project.route('/file/updateDistributedValue', methods=['POST'])
def update_distributed_value():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        project_dir = os.path.join(root_path, params["projectName"])
        path = os.path.join(project_dir, params["projectName"] + '.yaml')

        with open(path, 'r', encoding='utf-8') as file:
            data = yaml.safe_load(file)

            # 设置所有项目的 distributed 值为新值
        for execution_item in data.get('execution', []):
            execution_item['distributed'] = params['distributedValue']

            # 将修改后的数据写入文件
        with open(path, 'w', encoding='utf-8') as file:
            # yaml.dump(data, file, allow_unicode=True)
            yaml.dump(data, file, Dumper=NoAliasDumper, allow_unicode=True, sort_keys=False)

            # 获取远程机器的用户名和密码
        ips = params['distributedValue']
        credentials = get_machine_credentials(ips)
        current_user = session['loginname']  # 当前操作人
        logger.info(f"操作人{current_user}：选择分布式压测机：{credentials}")

        # 上传文件到指定服务器的指定路径下)
        for ip, (username, password) in zip(ips, cycle(credentials)):
            print(ip, username, password)
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(ip, port=22, username=username, password=password)

            output = execute_command_linux(ssh, 'ps -ef | grep jmeter-server | grep -v grep')
            if './jmeter-server' in output:
                sftp = ssh.open_sftp()
                local_testplans_dir = os.path.join(project_dir, 'testplans')

                for root, dirs, files in os.walk(local_testplans_dir):
                    for file in files:
                        if file.endswith(('.txt', '.csv','.dat','.pdf', '.png', '.jpge', '.jpg',  '.xlsx', '.docx', '.doc', '.xls')):
                            logger.info(f"操作人{current_user}：参数化文件：{file}")
                            local_file_path = os.path.join(local_testplans_dir, file)
                            remote_file_path = '/home/apache-jmeter-5.2.1/bin/' + file
                            sftp.put(local_file_path, remote_file_path)
                sftp.close()
                ssh.close()
            else:
                return handle_exception("jmeter-server 未启动", f"jmeter-server 未启动，IP地址: {ip}")

    except Exception as err:  # 捕获异常
        return handle_exception(str(err), "程序异常")
    else:
        return handle_success(params['distributedValue'], "添加测试机成功")


# 获取YAML配置
@project.route('/file/getYamlConfig', methods=['POST'])
def get_yaml_config():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        project_dir = os.path.join(root_path, params["projectName"])
        print("params:", params)
        if not os.path.exists(root_path):
            return handle_exception("根路径错误", "全局配置错误，请联系管理员进行配置")
        if not params.get('projectName'):
            return handle_exception("error", "还未创建项目")
        path = os.path.join(project_dir, params["projectName"] + '.yaml')
        if not os.path.isfile(path):
            return handle_exception("error", "yaml文件不存在")
        with open(path, 'r', encoding='utf-8') as file:
            data = yaml.safe_load(file)

            runtime = data['settings']['env']['Runtime']
            execution_values = data.get('execution', [])
            result = {
                'settings': {
                    'env': {
                        'Runtime': runtime
                    }
                },
                'execution': []
            }
            if execution_values:
                for execution in execution_values:
                    concurrency = execution['concurrency']
                    ramp_up = execution['ramp-up']
                    ramp_up_value = int(re.search(r'\d+', ramp_up).group())
                    scenario = execution['scenario']
                    targettime = execution['targettime']
                    lastavgtime = execution['lastavgtime']
                    lasttps = execution['lasttps']
                    holdfor = execution['hold-for']
                    distributed = execution['distributed']

                    result['execution'].append({
                        'scenario': scenario,
                        'concurrency': concurrency,
                        'ramp-up': ramp_up_value,
                        'targettime': targettime,
                        'lastavgtime': lastavgtime,
                        'lasttps': lasttps,
                        'hold-for': holdfor,
                        'distributed': distributed

                    })
            logger.info("操作人{lock_user}：yaml filename is: %s, %s", result, type(result))
    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(result, "success")

# 执行时根据勾选的场景修改YAML文件
@project.route('/file/generateNewYaml', methods=['POST'])
def generate_new_yaml():
    try:
        # 获取前端传来的JSON数据
        params = request.json
        root_path = get_global_config()['rootPath']
        project_name = params["projectName"]
        project_dir = os.path.join(root_path, project_name)
        yaml_path = os.path.join(project_dir, f"{project_name}.yaml")
        yaml_run_path = os.path.join(project_dir, f"{project_name}_run.yaml")

        # 读取旧的YAML文件内容
        with open(yaml_path, "r", encoding='utf-8') as old_file:
            yaml_content = yaml.safe_load(old_file)

        # 处理多个 scenarioName
        scenario_names = params.get("scenarioNames", [])
        new_execution = [item for item in yaml_content.get('execution', [])
                         if item.get("scenario") in scenario_names]

        # 保持其他内容与旧的 YAML 文件一致
        new_yaml_content = yaml_content.copy()
        new_yaml_content['execution'] = new_execution

        # 将更新后的YAML内容写入到文件中
        with open(yaml_run_path, "w", encoding='utf-8', ) as new_file:
            yaml.safe_dump(new_yaml_content, new_file, default_flow_style=False, allow_unicode=True, sort_keys=False)

    except Exception as err:
        return handle_exception(err, "程序异常")
    else:
        return handle_success(new_yaml_content, "success")

# 传参：projectName
# 返回值：data:"10.58.20.17"
@project.route('/file/getUserIP', methods=['POST'])
def get_user_ip():
    try:
        params = request.json
        project_name = params["projectName"]
        PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 根目录
        json_file_path = os.path.join(PROJECT_ROOT, 'Solo\static\ProjectInfo.json')
        client_ip = request.headers.get('X-Real-IP')  # 也可以尝试 'X-Forwarded-For'

        # # 读取 JSON 文件
        # with open(json_file_path, 'r') as file:
        #     data = json.load(file)
        #
        # # 查找对应的 projectname
        # for project in data:
        #     if project.get("projectname") == project_name:
        #         project["executor"] = client_ip
        #         break
        # # 写回 JSON 文件
        # with open(json_file_path, 'w') as file:
        #     json.dump(data, file, indent=4, ensure_ascii=False)

    except Exception as err:  # 捕获异常
        print(err)  # 打印异常参数
        res = MyResponse({"err": err}, False, "error").post()
        return res
    else:
        print("User IP Address:", client_ip)
        return handle_success(client_ip, "success")

# 传参：projectName
# 返回值：data:"10.58.20.17"
@project.route('/file/getExecutor', methods=['POST'])
def get_executor():
    try:
        params = request.json
        project_name = params.get("projectName")
        # PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))  # 根目录
        # json_file_path = os.path.join(PROJECT_ROOT, r'Solo\static\ProjectInfo.json')
        #
        # # 读取 JSON 文件
        # with open(json_file_path, 'r') as file:
        #     data = json.load(file)
        #
        # executor = None  # 初始化变量
        # # 查找对应的 projectname
        # for project in data:
        #     if project.get("projectname") == project_name:
        #         executor = project.get("executor")
        query_sql="SELECT run_user FROM taskrun t WHERE project_id =(SELECT id FROM project WHERE project_name= %s ) and status= 'running'"
        executor=db_handler.execute_query(query_sql,(project_name,))[0]['run_user']

    except Exception as err:  # 捕获异常
        print(err)  # 打印异常参数
        res = MyResponse({"err": str(err)}, False, "error").post()
        return res
    else:
        print("User IP Address:", executor)
        return handle_success(executor, "success")

# 配置页-获取项目已绑定测试机IP
@project.route('/file/getBindSlaveIp', methods=['POST'])
def get_bind_slave_ip():
    try:
        params = request.json
        project_name = params.get("projectName")
        query = "SELECT slaver_ip FROM project WHERE project_name= %s"
        results = db_handler.execute_query(query,(project_name,))
        print(results)

        # 如果结果为空，返回空字符串
        if not results or not results[0].get('slaver_ip'):
            slaver_ip = ''
        else:
            # 直接提取第一个 slaver_ip 的值
            slaver_ip = results[0]['slaver_ip']
    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(slaver_ip, "查询成功")
# 配置页-绑定测试机
@project.route('/file/bindSlaveIP', methods=['POST'])
def bind_slave_ip():
    try:
        params = request.json
        project_name = params.get("projectName")
        slave_ips = params["slaverIp"] # 获取传递的 slaveIP 数组

        if not slave_ips:
            slave_ips_str = ''
        else:
            # 将 slaveIP 列表转为逗号分隔的字符串
            slave_ips_str = ','.join(slave_ips)

        update_sql = "UPDATE project SET slaver_ip = %s WHERE project_name = %s"

        db_handler.execute_update(update_sql, (slave_ips_str, project_name))

    except Exception as err:  # 捕获异常
        print(err)  # 打印异常信息
        res = MyResponse({"err": str(err)}, False, "error").post()
        return res
    else:
        print(f"Updated slave IPs for project '{project_name}': {slave_ips_str}")
        return handle_success(slave_ips, "绑定成功")

# def tail(f, n=10):
#     with open(f, 'rb') as file:
#         file.seek(0, os.SEEK_END)
#         pointer = file.tell()
#         lines = []
#         while pointer >= 0 and len(lines) < n:
#             file.seek(pointer)
#             pointer -= 1
#             if file.read(1) == b'\n':
#                 lines.append(file.readline().decode())
#         return ''.join(reversed(lines))

def get_last_n_lines(file_path, n, encodings=None):
    if encodings is None:
        encodings = ['utf-8', 'gbk', 'iso-8859-1']
    try:
        with open(file_path, 'rb') as file:
            # 获取文件大小
            file.seek(0, os.SEEK_END)
            file_size = file.tell()

            # 每次读取的行数
            chunk_size = 1024

            lines_buffer = []
            current_chunk_start = file_size

            while len(lines_buffer) < n and current_chunk_start > 0:
                # 确定要读取的数据量
                amount_to_read = min(chunk_size * 10, current_chunk_start)
                # 回滚到正确的位置
                file.seek(-amount_to_read, os.SEEK_CUR)
                # 读取数据块
                data_chunk = file.read(amount_to_read)
                decoded_data_chunk = None

                # 尝试不同的编码解码数据块
                for encoding in encodings:
                    try:
                        decoded_data_chunk = data_chunk.decode(encoding)
                        break
                    except UnicodeDecodeError:
                        continue

                if decoded_data_chunk is None:
                    raise ValueError("Could not decode the file using any of the provided encodings.")

                # 分割成行并反转
                lines_in_chunk = decoded_data_chunk.split('\n')[::-1]

                # 更新缓冲区和当前块的起始点
                lines_buffer.extend(lines_in_chunk)
                current_chunk_start -= amount_to_read

            return '\n'.join(lines_buffer[:n])

    except Exception as err:
        raise err

@project.route('/getJmeterLog', methods=['POST'])
def get_jmeter_log():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        log_dir = os.path.join(root_path, params["projectName"], "TaurusRunlog")
        if not os.path.exists(log_dir):
            return handle_exception("error", "该项目尚未产生日志")
        with os.scandir(log_dir) as entries:
            # 筛选出所有子目录
            dirs = [entry.name for entry in entries if entry.is_dir()]

            # 解析时间戳并排序
            dir_times = [(dir_name, datetime.strptime(dir_name, '%Y-%m-%d-%H%M%S')) for dir_name in dirs]
            dir_times.sort(key=lambda x: x[1], reverse=True)

        # 获取最新的子目录名称
        timedir = dir_times[0][0] if dir_times else None
        print(timedir)
        jmeter_log = os.path.join(log_dir, timedir, "jmeter.log")

        content = get_last_n_lines(jmeter_log, 1000)


    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(content, "success")



@project.route('/getTaurusLog', methods=['POST'])
def get_taurus_log():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        log_dir = os.path.join(root_path, params["projectName"], "TaurusRunlog")
        if not os.path.exists(log_dir):
            return handle_exception("error", "该项目尚未产生日志")
        with os.scandir(log_dir) as entries:
            # 筛选出所有子目录
            dirs = [entry.name for entry in entries if entry.is_dir()]

            # 解析时间戳并排序
            dir_times = [(dir_name, datetime.strptime(dir_name, '%Y-%m-%d-%H%M%S')) for dir_name in dirs]
            dir_times.sort(key=lambda x: x[1], reverse=True)

        # 获取最新的子目录名称
        timedir = dir_times[0][0] if dir_times else None
        print(timedir)
        taurus_log = os.path.join(log_dir, timedir, "bzt.log")

        # 读取最后100行日志
        content = get_last_n_lines(taurus_log, 1000)

    except Exception as err:  # 捕获异常
        return handle_exception(err, "程序异常")
    else:
        return handle_success(content, "success")

# 下载jmeter日志
@project.route('/downloadJmeterLog', methods=['POST'])
def download_jmeter_log():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        log_dir = os.path.join(root_path, params["projectName"], "TaurusRunlog")
        if not os.path.exists(log_dir):
            return handle_exception("error", "该项目尚未产生日志")
        with os.scandir(log_dir) as entries:
            # 筛选出所有子目录
            dirs = [entry.name for entry in entries if entry.is_dir()]

            # 解析时间戳并排序
            dir_times = [(dir_name, datetime.strptime(dir_name, '%Y-%m-%d-%H%M%S')) for dir_name in dirs]
            dir_times.sort(key=lambda x: x[1], reverse=True)

        # 获取最新的子目录名称
        timedir = dir_times[0][0] if dir_times else None
        print(timedir)
        jmeter_log = os.path.join(log_dir, timedir, "jmeter.log")
        res = send_file(jmeter_log, as_attachment=True)
    except Exception as err:  # 捕获异常
        print(err)  # 打印异常参数
        res = MyResponse({"err": err}, False, "error").post()
        return res
    else:
        return res

# 下载taurus日志
@project.route('/downloadTaurusLog', methods=['POST'])
def download_taurus_log():
    try:
        params = request.json
        root_path = get_global_config()['rootPath']
        log_dir = os.path.join(root_path, params["projectName"], "TaurusRunlog")
        if not os.path.exists(log_dir):
            return handle_exception("error", "该项目尚未产生日志")
        with os.scandir(log_dir) as entries:
            # 筛选出所有子目录
            dirs = [entry.name for entry in entries if entry.is_dir()]

            # 解析时间戳并排序
            dir_times = [(dir_name, datetime.strptime(dir_name, '%Y-%m-%d-%H%M%S')) for dir_name in dirs]
            dir_times.sort(key=lambda x: x[1], reverse=True)

        # 获取最新的子目录名称
        timedir = dir_times[0][0] if dir_times else None
        print(timedir)
        jmeter_log = os.path.join(log_dir, timedir, "bzt.log")
        res = send_file(jmeter_log, as_attachment=True)
    except Exception as err:  # 捕获异常
        print(err)  # 打印异常参数
        res = MyResponse({"err": err}, False, "error").post()
        return res
    else:
        return res