from datetime import datetime
import os
from openpyxl import load_workbook
import re
import sys
import concurrent.futures
from sinotrans.core import FileParser,ExcelProcessor,EmlParser
from sinotrans.utils import Logger,GlobalThreadPool, ProgressManager

# 配置路径参数：os.path.abspath(__file__)
# 打包替换启动路径为：root_path3 = os.path.dirname(os.path.realpath(sys.executable))
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))   
TARGET_PATH = os.path.join(CURRENT_DIR, "target")
EMAIL_PATH = os.path.join(CURRENT_DIR, "email")
CONFIG_PATH = os.path.join(CURRENT_DIR, "conf")
DEBUG_PATH = os.path.join(CURRENT_DIR, "logs")
# 文件路径配置
RESOURCE_FILE = os.path.join(CURRENT_DIR, "input_data.xlsx")
TEMPLATE_FILE = os.path.join(CURRENT_DIR, "template.xlsx")
TARGET_FILE = os.path.join(TARGET_PATH, f"output_result_{timestamp}.xlsx")
MAPPING_FILE = os.path.join(CONFIG_PATH, "mapping.txt")
FIXED_MAPPING_FILE = os.path.join(CONFIG_PATH, "fixed_mapping.txt")
EMAIL_MAPPING_FILE = os.path.join(CONFIG_PATH, "email_mapping.txt")
CONTAINER_TYPES = ["20GP", "40GP", "40HQ"]
PO_NAME="PO号"
CONTAINER_TYPE_NAME="集装箱类型"

Logger(debug_path=DEBUG_PATH)

FileParser.ensure_directories_exist(directories = [
    TARGET_PATH,
    EMAIL_PATH,
    CONFIG_PATH,
])
GlobalThreadPool.initialize(
    max_workers=16,
    thread_name_prefix='AutoTOThreadPool',
    initializer=lambda: Logger.debug("AutoTOThreadPool initialized"),
    initargs=()
)
def process_fields(row_data, global_po_mapping):
    """复制源行数据，并结合邮件中集装箱类型及个数，生成多个新行"""
    try:
        po = str(row_data[PO_NAME])
        if po not in global_po_mapping:
            return []
        rows_to_add = []
        quantity = 1
        container_str = global_po_mapping[po].pop(CONTAINER_TYPE_NAME, None)
        if container_str is None:
            raise ValueError(f"❌ 未找到与 {po} 相关联的集装箱类型！")
        container_types = container_str.split(",")
        for type_str in container_types:
            match = re.search(r'(\d+)$', type_str)
            if match:
                quantity = int(match.group(1))
                type_part = type_str[:match.start(1)]
                type = type_part[:-1].replace("HC", "HQ").replace("GC", "GP").replace("STD", "GP")
            for _ in range(quantity):
                new_row = row_data.copy()
                new_row[CONTAINER_TYPE_NAME] = type
                rows_to_add.append(new_row)
    except Exception as e:
        print(f"❌ 根据原行数据，生成新行数据时发生错误: {str(e)}")
        raise
    return rows_to_add
def map_fields(generated_rows, column_mapping, fixed_mapping, email_mapping, global_po_mapping):
    """根据模板列顺序对生成的新行数据进行排序"""
    mapped_rows = []
    for row in generated_rows:
        mapped_row = {}
        mapped_row.update(ExcelProcessor.fixed_mapping(fixed_mapping))
        mapped_row.update(ExcelProcessor.column_mapping(row, column_mapping))
        mapped_row.update(ExcelProcessor.email_mapping(row, PO_NAME, global_po_mapping, email_mapping))
        # 集装箱类型：20GP、40GP、40HQ
        mapped_row[CONTAINER_TYPE_NAME] = row[CONTAINER_TYPE_NAME]
        mapped_rows.append(mapped_row)
    return mapped_rows
def process_resource_row(row_data, ns_output, column_mapping, fixed_mapping, email_mapping, global_po_mapping):
    try:
        if not row_data[PO_NAME]:  # 检查是否为 None 或空字符串
            Logger.debug("❌ PO号不能为空！")
            return []
        """处理源行数据，生成新行数据并返回"""
        generated_rows = process_fields(row_data, global_po_mapping)
        mapped_rows = map_fields(generated_rows, column_mapping, fixed_mapping, email_mapping, global_po_mapping)
        sorted_rows = ExcelProcessor.sort_generated_rows(mapped_rows, ns_output)
        Logger.debug(f"-PO号：{row_data[PO_NAME]}处理完成......")
        return sorted_rows
    except Exception as e:
        Logger.error(f"❌ 处理源行数据失败: {str(e)}")
        raise
def process_resource_data(ns_output, data_generator, progress, column_mapping, fixed_mapping, email_mapping, global_po_mapping):
    new_rows = [] 
    try:
        Logger.info("📋 正在处理资源数据...")
        with GlobalThreadPool.get_executor() as executor:
            futures = [
                executor.submit(process_resource_row, row, ns_output, column_mapping, fixed_mapping, email_mapping, global_po_mapping) 
                for row in data_generator
            ]
        progress.close()
        # 等待所有任务完成，设置超时时间
        done, not_done = concurrent.futures.wait(futures, timeout = 60)

        for future in futures:
            # 如果某些线程抛出异常，在调用 future.result() 时仍会触发异常。
            result = future.result()
            if result:
                new_rows.extend(result)
        Logger.info(f"✅ 扫描到{len(futures)}行非空数据，生成 {len(new_rows)} 行数据")
        return new_rows
    except Exception as e:
        Logger.error(f"❌ 处理资源数据失败: {str(e)}")
        raise

def main():
    try:
        # 读取两种映射关系：字段名映射和模板值映射
        column_mapping = FileParser.parse_mapping_dict_of_list(MAPPING_FILE,':', '|', ',', '=')   # 字段名映射
        fixed_mapping = FileParser.parse_mapping_dict(FIXED_MAPPING_FILE,':', '|', ',', '=')   # 模板值映射
        email_mapping = FileParser.parse_mapping_dict_of_list(EMAIL_MAPPING_FILE,':', '|', ',', '=')
        Logger.info("✅ 读取映射文件成功！")
    except Exception as e:
        Logger.error(f"❌ 映射文件读取失败: {str(e)}")
        return 

    try:
        Logger.info("📩 开始解析邮件文件...")
        eml_parse = EmlParser(email_mapping, EMAIL_PATH)
        global_po_mapping = eml_parse.parse_eml_files(PO_NAME)
        Logger.info(f"✅ 邮件解析完成，共获取{len(global_po_mapping)}封邮件内容")
    except Exception as e:
        Logger.error(f"❌ 邮件解析失败: {str(e)}")

    try:
        # 获取新工作簿和输出工作表
        nf_output = FileParser.create_newfile_by_template(TEMPLATE_FILE, TARGET_FILE, list("CRD"))
        ns_output = nf_output.active
        Logger.info("✅ 创建模板文件")
        # 仅读取源数据的值/表头
        Logger.info("📋 开始读取源数据......")
        rs_input = load_workbook(RESOURCE_FILE, read_only=True).active
        Logger.info(f"✅ 读取源数据成功！")
    except Exception as e:
        Logger.error(f"❌ 获取数据失败: {str(e)}")
        if os.path.exists(TARGET_FILE):
            os.remove(TARGET_FILE)
            Logger.error("❌ 获取数据失败，已删除目标文件。")
        return 
            
    try:
        progress = ProgressManager()
        # 使用生成器逐行处理，采用生成器模式避免一次性加载所有数据到内存中
        data_generator = ExcelProcessor.excel_row_generator(
            rs_input,
            RESOURCE_FILE,
            progress,
            required_columns=[PO_NAME]
        )
        new_rows = process_resource_data(ns_output, data_generator, progress, column_mapping, fixed_mapping, email_mapping, global_po_mapping)
        progress.close()
        Logger.info("✅ 处理完成！")
    except Exception as e:
        Logger.error(f"❌ 源数据处理失败: {str(e)}")
        if os.path.exists(TARGET_FILE):
            os.remove(TARGET_FILE)
            Logger.debug("❌ 源数据处理失败，已删除目标文件。")
        return 
    try:
        list(map(lambda row: ns_output.append(row), new_rows))
        nf_output.save(TARGET_FILE)
        Logger.info(f"🎉 结果已保存至: {TARGET_FILE}")
    except Exception as e:
        Logger.error(f"❌ 结果保存失败: {str(e)}")
        if os.path.exists(TARGET_FILE):
            os.remove(TARGET_FILE)
            Logger.debug("❌ 结果保存失败，已删除目标文件。")
        return

if __name__ == "__main__":
    main()
    input("Press Enter to exit...")