# main.py (最终完善版)

import json
import logging
import time
import os
from typing import Dict, Any

# --- 模块导入 ---
# 按流程顺序导入，代表了我们的处理流水线
from .utils.file_parser import FileParser
from .extractor.resume_parser import ResumeParser
from .extractor.jd_parser import JDParser
from .transformer.transformer_1 import transform_via_ai
from .matcher.embedding_matcher import EmbeddingMatcher
from .evaluator.generate_report import generate_report

# --- 日志配置 ---
# 设置日志格式，使其包含时间、级别和消息
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def process_screening(resume_path: str, jd_path: str, pass_threshold: float = 70.0) -> dict:
    """
    执行一次完整的、包含混合匹配策略和AI洞察的简历筛选流程。

    Args:
        resume_path (str): 简历文件路径 (可以是pdf, docx, txt, png等)。
        jd_path (str): 职位描述文件路径 (同上)。
        pass_threshold (float): 通过/不通过的阈值分数 (0-100)。

    Returns:
        dict: 最终生成的前端报告JSON。
    """
    start_time = time.time()
    logging.info(f"开始处理新任务 -> 简历: '{resume_path}', 职位: '{jd_path}'")

    try:
        # --- 步骤 1: 初步结构化 (Extraction) ---
        logging.info("步骤 1/4: 调用解析器提取并进行初步结构化...")
        
        # 调用各自的parse()方法，内部已集成FileParser，能处理多种文件格式
        resume_structured_data = ResumeParser().parse(resume_path)
        jd_structured_data = JDParser().parse(jd_path)
        
        # 健壮性检查：确保初步结构化成功
        if "error" in resume_structured_data or "error" in jd_structured_data:
            logging.error("解析器在初步结构化时失败。流程中止。")
            return {"error": "Initial parsing failed", "details": {"resume": resume_structured_data, "jd": jd_structured_data}}

        # --- 步骤 2: 统一格式化 (Transformation) ---
        logging.info("步骤 2/4: 调用Transformer将数据规整为统一格式...")
        
        # 使用Pydantic模型确保数据格式的稳定性和正确性
        unified_resume = transform_via_ai(resume_structured_data, role="resume")
        unified_jd = transform_via_ai(jd_structured_data, role="jd")
        
        # 健壮性检查：确保统一格式转换成功
        if "error" in unified_resume or "error" in unified_jd:
            logging.error("AI统一格式转换失败。流程中止。")
            return {"error": "AI unification failed", "details": {"resume": unified_resume, "jd": unified_jd}}
        
        logging.info("简历数据已规整。")
        logging.info(f"JD数据已规整，动态权重: {unified_jd.get('dimension_weights')}")

        # --- 步骤 3: 混合策略匹配 (Matching) ---
        logging.info("步骤 3/4: 执行混合策略匹配 (数值/类别/语义)...")
        matcher = EmbeddingMatcher()
        matcher_result = matcher.match(unified_resume, unified_jd)
        logging.info(f"匹配完成，原始总分: {matcher_result.get('total_score'):.4f}")

        # --- 步骤 4: 报告生成 (Reporting) ---
        logging.info("步骤 4/4: 生成最终评估报告...")
        
        # [核心修正] 将 unified_jd 传入 generate_report 以供AI生成洞察
        final_report = generate_report(
            matcher_result=matcher_result,
            unified_resume=unified_resume,
            unified_jd=unified_jd,
            resume_url=resume_path,
            threshold_score=pass_threshold
        )
        logging.info("报告生成成功！")

    except FileNotFoundError as e:
        logging.error(f"输入文件未找到: {e}", exc_info=False)
        return {"error": "File not found.", "details": str(e)}
    except Exception as e:
        logging.error(f"处理流程中发生未知错误: {e}", exc_info=True)
        return {"error": "An unexpected error occurred during the process.", "exception": str(e)}

    end_time = time.time()
    processing_time = end_time - start_time
    logging.info(f"任务处理完成，总耗时: {processing_time:.2f} 秒。")
    
    # 在最终报告中加入处理耗时信息
    if 'match_score' in final_report or 'final_score' in final_report:
        final_report['processing_time_seconds'] = round(processing_time, 2)

    return final_report

def process_resume_from_job_posting(resume_file_path: str, job_posting) -> dict:
    """
    从Django JobPosting模型对象中提取信息，与简历进行匹配
    
    Args:
        resume_file_path (str): 简历文件路径
        job_posting: Django JobPosting模型实例
        
    Returns:
        dict: 匹配结果报告
    """
    # 从JobPosting模型构建JD数据
    jd_data = {
        "job_title": job_posting.title,
        "department": job_posting.department,
        "required_hard_skills": job_posting.requirements_skills.split(',') if job_posting.requirements_skills else [],
        "required_soft_skills": job_posting.soft_skills.split(',') if job_posting.soft_skills else [],
        "experience_years": _extract_years_from_text(job_posting.working_experience),
        "education": job_posting.education,
        "language_requirements": [],
        "other_requirements": job_posting.description
    }
    
    # 创建临时JD文件
    import tempfile
    with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False, encoding='utf-8') as f:
        json.dump(jd_data, f, ensure_ascii=False, indent=2)
        jd_temp_path = f.name
    
    try:
        # 调用主处理流程
        result = process_screening(
            resume_path=resume_file_path,
            jd_path=jd_temp_path,
            pass_threshold=job_posting.required_match_score  # 数据库中已经是百分比值
        )
        return result
    finally:
        # 清理临时文件
        if os.path.exists(jd_temp_path):
            os.unlink(jd_temp_path)

def _extract_years_from_text(text: str) -> int:
    """从文本中提取工作年限"""
    if not text:
        return 0
    
    import re
    # 匹配常见的年限表达
    patterns = [
        r'(\d+)\s*年',
        r'(\d+)\s*年以上',
        r'(\d+)\s*年以下',
        r'(\d+)\s*年经验'
    ]
    
    for pattern in patterns:
        match = re.search(pattern, text)
        if match:
            return int(match.group(1))
    
    return 0

def main():
    """
    主执行函数，定义输入文件和配置，并调用处理流程。
    这是整个项目的入口点。
    """
    # --- 配置区 ---
    # 定义筛选的通过分数线 (0-100)
    # 混合匹配后，分数更准确，可以适当提高阈值
    PASS_THRESHOLD = 75.0 
    
    # 定义待处理的文件路径。
    # 您可以修改这些路径来测试不同的简历和职位描述。
    # 系统现在支持 .pdf, .docx, .txt, .png, .jpg 等多种格式。
    resume_path = "data/resumes/candidate_1.pdf"
    jd_path = "data/job_descriptions/software_engineer.txt"
    
    # --- 执行筛选流程 ---
    final_json_report = process_screening(
        resume_path=resume_path,
        jd_path=jd_path,
        pass_threshold=PASS_THRESHOLD
    )

    # --- 输出最终结果 ---
    print("\n" + "="*25 + " 最终输出的JSON报告 " + "="*25)
    print(json.dumps(final_json_report, indent=4, ensure_ascii=False))
    print("="*75)

if __name__ == "__main__":
    main() 