# -*- coding: utf-8 -*-
import os
import json
import subprocess
import re
import time
import pandas as pd
current_time = time.strftime("%Y%m%d_%H:%M%S", time.localtime(time.time()))

def get_path(data_dir):
    # 遍历data_dir目录下的文件路径
    return [os.path.join(data_dir, file) for file in os.listdir(data_dir)]

# 构建 config.json 文件内容
config_data = {
    "service_address": "192.168.8.101:50051",
    "input_path": "{}",  # 需要替换的文件路径
    "write_path": "D:\\ivision_data\\pack_ivision2.1_test3\\output",
    "algo_type": 0
}

#执行算法
def execute_algo(file_paths,client_path):
    for one in file_paths:
        # 更新 input_path 为遍历到的文件路径
        config_data["input_path"] = one
        # 写入 config.json 文件
        with open(client_path+r"\config.json", "w") as f:
            json.dump(config_data, f, indent=4)
        os.chdir(client_path)
        # 执行 client_for_test.exe
        # subprocess.call(["client_for_test.exe"])
        process = subprocess.Popen(["client_for_test.exe"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        output, error = process.communicate()  # 等待外部程序执行完毕并获取输出
        print(f"Output: {output.decode('utf-8')}")
        print(f"Error: {error.decode('utf-8')}")


#提取所需的数据
def cleanse_data(log_files,pattern_size,pattern_spacing,pattern_time):
    # 初始化存储提取结果的列表
    extracted_data = []
    # 遍历每个日志文件，提取信息
    for log_file in log_files:
        with open(log_file, "r") as f:
            log_content = f.read()
            size_match = re.findall(pattern_size, log_content)
            spacing_match = re.findall(pattern_spacing, log_content)
            time_match = re.findall(pattern_time, log_content)
            if size_match and spacing_match and time_match:
                size_tmp = size_match[0].split(",")
                spacing_tmp = spacing_match[0].split(" ")
                extracted_data.append({
                    "图像层数": size_tmp[0],
                    "图像尺寸": size_tmp[1] +"x"+size_tmp[2],
                    "层厚": spacing_tmp[0],
                    "图像间距": spacing_tmp[-2] +"\\"+spacing_tmp[-1],
                    "算法时间（S）": time_match[0]
                })
    return extracted_data

#保存成excel
def save_excel(extracted_data,excel_path):
    # 将提取的信息写入 Excel 表格
    df = pd.DataFrame(extracted_data)
    df.to_excel(excel_path, index=False)


if __name__ == '__main__':

    data_dir = "D:\\log"
    # # data_dir = r"D:\ivision_data\DATA_lung\1"
    target_dir = "D:\\test_tho_2024-03-22"
    file_paths = get_path(data_dir)
    print(file_paths)
    # execute_algo(file_paths,target_dir)
    # time.sleep(300)
    # 提取所需信息的正则表达式模式
    pattern_size = r"image size: \((.*?)\)"
    pattern_spacing = r"image spacing: \[(.*?)\]"
    pattern_time = r"\[thoracic_surgery_wrapper\] used: (.*?)s"
    file_paths = get_path("D:\\noahlog")
    extracted_data = cleanse_data(file_paths,pattern_size,pattern_spacing,pattern_time)
    save_excel(extracted_data, "resultsnoah1.xlsx")


