# main.py
import os
import time
import json  # 需要导入json

# 导入项目配置 (确保路径等已设置)
import config

# 导入各个步骤的运行函数
# 修改：process_all_text_files 现在从实际的 text_preprocessor 导入
from src.text_preprocessor import process_all_text_files  # , extract_sentences_from_file (如果main中也用)
from src.step1_keyword_expander import run_keyword_expansion_step, load_sentences
from src.step2_sentence_extractor import run_sentence_extraction_step  # 假设此步骤也需要调整
from src.step3_api_labeler import run_api_labeling_step
from src.step4_classifier_trainer import run_classifier_training_step
import logging
# 导入通用工具
from src.utils import setup_logger, get_all_files, load_json_file, \
    save_json_file  # load_json_file, save_json_file 可能不再直接用于all_sentences缓存

# 设置一个主日志记录器
main_logger = setup_logger("main_project_runner", "main_project.log")
jieba_logger = logging.getLogger('jieba')
jieba_logger.setLevel(logging.WARNING)


def main():
    main_logger.info("开始执行气候风险分析项目...")
    start_time_total = time.time()

    # --- 准备阶段：处理所有报告文件并将句子分块保存 ---
    # 不再使用单一的 ALL_SENTENCES_CACHE_FILE
    # 而是检查 SENTENCE_JSON_DIR 是否已有内容，并可能基于文件数量或时间戳决定是否重新处理

    # 确保 SENTENCE_JSON_DIR 在 config 中已定义
    if not hasattr(config, 'SENTENCE_JSON_DIR') or not config.SENTENCE_JSON_DIR:
        main_logger.error("错误: config.py 中未定义 SENTENCE_JSON_DIR。项目终止。")
        return

    os.makedirs(config.SENTENCE_JSON_DIR, exist_ok=True)  # 确保目录存在

    # 决定是否需要重新处理文本文件
    # 简单策略：如果目录为空，则处理。可以加入更复杂的逻辑，如检查文件修改时间。
    existing_sentence_files = [f for f in os.listdir(config.SENTENCE_JSON_DIR) if f.endswith('.json')]

    # min_sentence_length 从 config 获取
    min_sent_len = getattr(config, 'MIN_SENTENCE_LENGTH', 5)

    # 始终运行文本预处理，process_all_text_files内部会处理块逻辑
    # if not existing_sentence_files: # 或者加入其他重新处理的判断逻辑
    main_logger.info("开始文本预处理阶段：收集、处理所有报告文件并分块保存句子...")
    start_time_preprocess = time.time()

    # annual_report_files = get_all_files(config.ANNUAL_REPORTS_DIR, extensions=('.txt',))
    # csr_report_files = get_all_files(config.CSR_REPORTS_DIR, extensions=('.txt',))
    # all_report_files = annual_report_files + csr_report_files
    #
    # if not all_report_files:
    #     main_logger.error("错误：在 data/annual_reports 或 data/csr_reports 中没有找到任何 .txt 文件。")
    #     main_logger.error("请确保数据已准备好。项目终止。")
    #     return
    #
    # main_logger.info(f"共找到 {len(all_report_files)} 个报告文件进行预处理。")
    #
    # files_per_json_chunk = getattr(config, 'FILES_PER_JSON_CHUNK', 100)  # 从config获取，或设默认值
    # num_proc_text_prep = getattr(config, 'NUM_PROCESSES_TEXT_PREP', None)  # None会让函数用cpu_count()
    #
    # processed_chunk_paths = process_all_text_files(
    #     all_report_files,
    #     min_sent_len,
    #     config.SENTENCE_JSON_DIR,
    #     files_per_chunk=files_per_json_chunk,
    #     num_processes=num_proc_text_prep
    # )
    #
    # if not processed_chunk_paths and not os.listdir(config.SENTENCE_JSON_DIR):  # 检查是否有文件生成
    #     main_logger.error("文本预处理后未能提取并保存任何句子。项目终止。")
    #     return
    #
    # end_time_preprocess = time.time()
    # main_logger.info(f"文本预处理阶段完成，耗时: {end_time_preprocess - start_time_preprocess:.2f} 秒。")
    # main_logger.info(f"句子JSON文件保存在: {config.SENTENCE_JSON_DIR}")
    # else:
    #     main_logger.info(f"在 {config.SENTENCE_JSON_DIR} 中找到 {len(existing_sentence_files)} 个已存在的句子JSON文件。跳过文本预处理。")

    # --- 步骤1：关键词扩展 ---
    # main_logger.info("\n" + "=" * 20 + " 开始步骤1：关键词扩展 " + "=" * 20)
    # start_time_step1 = time.time()
    # try:
    #     # run_keyword_expansion_step 现在接收句子JSON目录
    #     run_keyword_expansion_step(config.SENTENCE_JSON_DIR)
    #
    # except Exception as e:
    #     main_logger.error(f"步骤1执行失败: {e}", exc_info=True)
    #     main_logger.error("项目因步骤1错误而终止。")
    #     return
    # end_time_step1 = time.time()
    # main_logger.info(f"步骤1完成，耗时: {end_time_step1 - start_time_step1:.2f} 秒。")

    # --- 步骤2：相似句子查找 ---
    main_logger.info("\n" + "=" * 20 + " 开始步骤2：相似句子查找 " + "=" * 20)

    start_time_step2 = time.time()
    try:
        # run_sentence_extraction_step 也需要修改以处理来自 SENTENCE_JSON_DIR 的句子
        # 假设它也被修改为接收目录路径
        all_sentences = load_sentences(config.SENTENCE_JSON_DIR)
        run_sentence_extraction_step(all_sentences)
    except Exception as e:
        main_logger.error(f"步骤2执行失败: {e}", exc_info=True)
        main_logger.error("项目因步骤2错误而终止。")
        return
    end_time_step2 = time.time()
    main_logger.info(f"步骤2完成，耗时: {end_time_step2 - start_time_step2:.2f} 秒。")

    # --- 步骤3：API打标签 ---
    # (这部分逻辑不变，但其输入 CANDIDATE_SENTENCES_FILE 的生成方式可能已改变)
    main_logger.info("\n" + "=" * 20 + " 开始步骤3：API打标签 " + "=" * 20)
    if not config.DEEPSEEK_API_KEY or config.DEEPSEEK_API_KEY == "YOUR_DEEPSEEK_API_KEY":
        main_logger.warning("DeepSeek API Key 未在 config.py 中配置或使用的是占位符。")
        main_logger.warning("步骤3 (API打标签) 和步骤4 (分类器训练) 将被跳过。")
    elif not os.path.exists(config.CANDIDATE_SENTENCES_FILE) or os.path.getsize(
            config.CANDIDATE_SENTENCES_FILE) < 5:
        main_logger.warning(f"候选句子文件 {config.CANDIDATE_SENTENCES_FILE} 不存在或为空。")
        main_logger.warning("步骤3 (API打标签) 和步骤4 (分类器训练) 将被跳过。")
    else:
        start_time_step3 = time.time()
        try:
            run_api_labeling_step()
        except Exception as e:
            main_logger.error(f"步骤3执行失败: {e}", exc_info=True)
            main_logger.warning("步骤3 API打标签遇到错误，后续步骤可能无法正常运行。")
        end_time_step3 = time.time()
        main_logger.info(f"步骤3完成，耗时: {end_time_step3 - start_time_step3:.2f} 秒。")

    # --- 步骤4：分类器训练 ---
    # (逻辑不变)
    main_logger.info("\n" + "=" * 20 + " 开始步骤4：分类器训练 " + "=" * 20)
    if not config.DEEPSEEK_API_KEY or config.DEEPSEEK_API_KEY == "YOUR_DEEPSEEK_API_KEY":
        main_logger.warning("因API Key未配置，跳过步骤4 (分类器训练)。")
    elif not os.path.exists(config.LABELED_SENTENCES_API_FILE) or os.path.getsize(
            config.LABELED_SENTENCES_API_FILE) < 5:
        main_logger.warning(f"API打标后的句子文件 {config.LABELED_SENTENCES_API_FILE} 不存在或为空。")
        main_logger.warning("跳过步骤4 (分类器训练)。")
    else:
        start_time_step4 = time.time()
        try:
            run_classifier_training_step()
        except Exception as e:
            main_logger.error(f"步骤4执行失败: {e}", exc_info=True)
        end_time_step4 = time.time()
        main_logger.info(f"步骤4完成，耗时: {end_time_step4 - start_time_step4:.2f} 秒。")

    end_time_total = time.time()
    main_logger.info(f"\n气候风险分析项目全部执行完毕。总耗时: {end_time_total - start_time_total:.2f} 秒。")
    main_logger.info("请检查 'outputs' 文件夹中的结果和日志。")


if __name__ == '__main__':
    # 在 config.py 中加入以下用于测试的配置 (如果还没有的话)
    # import os
    # BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # 项目根目录
    # OUTPUTS_DIR = os.path.join(BASE_DIR, "outputs")
    # DATA_DIR = os.path.join(BASE_DIR, "data")
    # ANNUAL_REPORTS_DIR = os.path.join(DATA_DIR, "annual_reports")
    # CSR_REPORTS_DIR = os.path.join(DATA_DIR, "csr_reports")
    # SENTENCE_JSON_DIR = os.path.join(OUTPUTS_DIR, "sentence_chunks")
    # MIN_SENTENCE_LENGTH = 5
    # FILES_PER_JSON_CHUNK = 2 # 测试时可以设小一点
    # NUM_PROCESSES_TEXT_PREP = 2 # 测试时限制进程数
    # CANDIDATE_SENTENCES_FILE = os.path.join(OUTPUTS_DIR, "candidate_sentences_for_api.json") # 示例
    # LABELED_SENTENCES_API_FILE = os.path.join(OUTPUTS_DIR, "labeled_sentences_from_api.json") # 示例
    # DEEPSEEK_API_KEY = "YOUR_DEEPSEEK_API_KEY" # 示例

    # # 为了测试 main.py，您可能需要创建示例数据目录和文件
    # if not os.path.exists(config.ANNUAL_REPORTS_DIR):
    #     os.makedirs(config.ANNUAL_REPORTS_DIR)
    #     with open(os.path.join(config.ANNUAL_REPORTS_DIR, "dummy_annual_report.txt"), "w") as f:
    #         f.write("This is a dummy annual report sentence. Another one.")
    # if not os.path.exists(config.CSR_REPORTS_DIR):
    #     os.makedirs(config.CSR_REPORTS_DIR)
    #     with open(os.path.join(config.CSR_REports_DIR, "dummy_csr_report.txt"), "w") as f:
    #         f.write("This is a dummy CSR report sentence. And another one.")

    main()