import json
import asyncio
import time
from datetime import timezone
from django.utils import timezone
from asgiref.sync import sync_to_async
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from django.db.models import QuerySet


import requests
from .redis_task import TaskRedisManager  # 导入工具类
import numpy as np
from urllib.parse import urlparse
from celery import shared_task, Task, current_task
import redis
from django.db import transaction
import logging
from celery import shared_task
# 模型导入
from job.models import Interview
from home.models import DeliverJobposting
from job.models import ResumeParseData
from user.models import JobInfo
from home.models import Jobposting
import jieba
import jieba.analyse
# 导入检索工具
from .es_retriever import ESRetriever
from .chroma_retriever import ChromaRetriever
import traceback

logger = logging.getLogger(__name__)

from job.utils.openai_llm import llm
from django.utils import timezone
from job.consumers import push_screening_result_to_hr, push_screening_result_to_user

# # 职位描述工具类
# from job.utils.redis_jobdesc import JobDescCache
# jobdesc_cache = JobDescCache()


# # Redis 配置（用于职位描述存储）
redis_client = redis.Redis(host='localhost', port=6379, db=1, decode_responses=True)



# 数据处理模块：提取简历和职位关键信息
class UserInfoProcessor:
    async def extract_key_info(self, resume_id: int) -> dict:
        """
        从数据库读取已结构化的简历信息，而非重新提取。

        :param resume_id: 简历的ID
        :return: 包含技能、工作经验、教育背景和原始文本的字典
        """
        try:
            resume_data = await sync_to_async(ResumeParseData.objects.get)(jobinfo_id=resume_id)
            # 直接从数据库字段获取结构化信息
            return {
                "skills": resume_data.skills,
                "experience": resume_data.work_experience,
                "education": resume_data.education,
                "raw_text": resume_data.parse_text  # 保留原始文本用于相似度计算
            }
        except ResumeParseData.DoesNotExist:
            logger.error("提取简历信息失败")
            raise Exception("提取简历信息失败")

    async def analyze_job_desc(self, job_id: int) -> dict:
        """分析职位描述，提取核心要求"""
        """
            从缓存获取职位描述信息，不存在则提取并缓存
         """
        max_retries = 3
        retries = 0
        while retries < max_retries:
            cache_key = f"job_desc:{job_id}"
            cached_data = redis_client.get(cache_key)
            if cached_data:
                return json.loads(cached_data)
            # 缓存不存在，调用职位描述分析函数
            resume_processor = ResumeProcessor()
            job = await resume_processor.process_job_resumes(job_id)
            structured_data = {
                "skills": job["skills"],
                "description": job["description"],
                "education": job["education"],
                "raw_text": ",".join(str(value) for value in job.items())
            }
            # 存入Redis缓存（设置合理的过期时间）
            redis_client.setex(cache_key, 3600, json.dumps(structured_data))
            if redis_client.get(cache_key):
                return structured_data
            retries += 1
            time.sleep(1)  # 等待1秒后重试
        return {}  # 重试失败，返回空字典

user_processor=UserInfoProcessor()



from job.utils.openai_llm import llm
"""核心简历处理器，协调各组件完成简历处理方法"""
class ResumeProcessor:
    def __init__(self):
        self.es_retriever = ESRetriever()
        self.chroma_retriever = ChromaRetriever()
        # 初始化jieba
        jieba.initialize()

        self.llm_client = llm

    """ 获取指定职位的未处理简历"""
    def get_unprocessed_resumes(self, job_id):
        return DeliverJobposting.objects.filter(
            jobposting_id=job_id,
            resume_status=0  # 0=未处理
        ).select_related('jobinfo')  # 预加载求职者信息


    """ 简历处理器，完成简历解析、提取内容等操作"""
    def extract_content(self, url):
        try:
            # 从 URL 中提取文件的扩展名，并将其转换为小写形式
            file_ext = urlparse(url).path.split('.')[-1].lower()
            response = requests.get(url, stream=True, timeout=10)  # 发起HTTP请求 流式读取
            response.raise_for_status()  # 如果响应状态不是成功 则抛出异常
            # 根据文件扩展名判断文件类型 使用不同方法提取页面内容
            import io
            if file_ext == 'pdf':
                import pdfplumber
                # 打开原始响应数据作为 PDF 文件对象
                with pdfplumber.open(io.BytesIO(response.content)) as pdf:
                    return '\n'.join(
                        [page.extract_text() or '' for page in pdf.pages])  # 合并所有页面文本
            elif file_ext == 'docx':
                from docx import Document
                doc = Document(io.BytesIO(response.content))
                return '\n'.join([para.text for para in doc.paragraphs])  # 合并所有段落实文本
            elif file_ext == 'txt':
                return response.text  # 直接返回响应的文本内容
            else:
                raise ValueError(f"不支持的文件类型: {file_ext}")
        except Exception as e:
            # 详细记录错误信息，便于排查问题
            logger.error(f"提取内容失败: {str(e)}，URL: {url}")
            raise Exception(f"提取内容失败: {str(e)}")


    """ 用 jieba 提取结构化数据（替代spaCy）"""
    def extract_structured_data(self, text):
        # 提取姓名（基于规则：假设姓名在文本靠前位置且为2-4个汉字）
        import re
        name_pattern = re.compile(r'[^\u4e00-\u9fa5]([\u4e00-\u9fa5]{2,4})[^\u4e00-\u9fa5]')
        name_match = name_pattern.search(text[:200])  # 只看前200字符
        name = name_match.group(1) if name_match else ""

        # 提取学历
        education_keywords = ["初中以下", '初中', "中专", "本科", "硕士", "博士及以上", "大专", "高中"]
        education = [kw for kw in education_keywords if kw in text]
        education = education[0] if education else ""

        # 提取技能（基于TF-IDF关键词提取）
        skills = jieba.analyse.extract_tags(text, topK=20, withWeight=False)
        # 过滤非技能词（可根据实际情况扩展）
        non_skill_words = {
            "公司", "职责", "工作", "项目", "负责", "时间", "经验", "描述",
            "岗位", "职位", "内容", "参与", "完成", "协助", "提升", "使用",
            "技术", "进行", "相关", "开发", "设计", "实现", "管理", "系统",
            "产品", "服务", "客户", "团队", "业务", "流程", "能力", "了解",
            "熟悉", "掌握", "精通", "具备", "拥有", "良好", "优秀", "出色"
        }
        skills = [skill for skill in skills if skill not in non_skill_words and len(skill) > 1]

        # 提取工作经历（基于规则：匹配包含公司和时间的句子）
        sentences = re.split(r'[。；！？\n]', text)   # 分割句子
        work_experience = []
        for sent in sentences:
            sent = sent.strip()
            if not sent:
                continue
            # 检查是否包含时间信息
            has_time = (
                    re.search(r'[0-9]{4}[年/-][0-9]{1,2}', sent) or  # 2019年5月 或 2019-05
                    re.search(r'[0-9]{4}\s*年', sent) or  # 2019年
                    re.search(r'至今|目前|现在', sent)  # 至今
            )
            # 检查是否包含职位/公司信息
            has_company = '公司' in sent or '企业' in sent or '集团' in sent or '有限' in sent or '机构' in sent  or '团队' in sent
            if has_time and (has_company):
                work_experience.append(sent)
                # 达到最大数量则停止
                if len(work_experience) >= 5:  # 取前5条
                    break

        return {
            "name": name,
            "education": education,
            "skills": skills,
            "work_experience": work_experience
        }


    """ 结构化数据 保存到MySQL"""
    def save_to_mysql(self, job_id, resume_id, raw_text, structured_data):
        try:
            jobposting = Jobposting.objects.get(id=job_id)      # 职位描述
            jobinfo = JobInfo.objects.get(id=resume_id)
            return ResumeParseData.objects.create(
                jobsposting_id=jobposting.id,  # 关联职位id
                jobinfo_id=jobinfo.id,  # 关联求职者/简历
                parse_text=raw_text,  # 解析后的纯文本
                skills=structured_data["skills"],  # 技能列表
                work_experience=structured_data["work_experience"],  # 工作经历
                education=structured_data["education"],  # 学历
            )
        except Exception as e:
            # 详细记录保存数据到 MySQL 时的错误信息
            logger.error(f"保存结构化数据到 MySQL 失败: {str(e)}，job_id: {job_id}，resume_id: {resume_id}")
            raise


    """ 提取职位信息 构建query_text"""
    def process_job_resumes(self, job_id: int):
        try:
            jobposting = Jobposting.objects.get(id=job_id)
            job_type_mapping = {0: "不限", 1: "兼职", 2: "全职"}
            job_type = job_type_mapping.get(jobposting.type, "不限")
            working_mapping = {
                0: "经验不限", 1: "在校生", 2: "应届生",
                3: "1-3年", 4: "3-5年", 5: "5年以上"
            }
            working_exp = working_mapping.get(jobposting.working, "经验不限")
            title = jobposting.title or ""
            money = jobposting.money or ""
            education = jobposting.education or ""
            label = jobposting.label or ""
            description = jobposting.description or ""

            # 用jieba提取职位描述中的技能关键词
            trigger_verbs = {"使用", "熟悉", "掌握", "负责", "开发", "精通", "了解", "具备", "做", "处理", "学习"}
            words = jieba.lcut(description)
            skill_candidates = []
            for i, word in enumerate(words):
                if word in trigger_verbs and i + 1 < len(words):
                    next_word = words[i + 1]
                    if len(next_word) > 2 and not next_word.isdigit():
                        skill_candidates.append(next_word)
            # 补充专有名词
            skill_candidates += jieba.analyse.extract_tags(description, topK=15)
            skills = list(set(skill_candidates))
            skills_text = " ".join(skills)

            query_text_parts = {
                "title": title,
                "money": money,
                "job_type": job_type,
                "working_exp": working_exp,
                "education": education,
                "label": label,
                "skills": skills_text,
                "description": description,
            }
            cache_key = f"job_desc:{job_id}"
            redis_client.setex(cache_key, 3600, json.dumps(query_text_parts))
            return query_text_parts
        except Exception as e:
            # 详细记录提取职位信息时的错误信息
            logger.error(f"提取职位信息失败: {str(e)}，job_id: {job_id}")
            raise


    """ 混合使用 Elasticsearch（ES）和 Chroma 进行检索，并融合结果 """
    def _normalize_chroma_distance(self, chroma_scores):
        """将 Chroma 距离转为相似度（0~1，越大越好）"""
        if not chroma_scores:
            return {}
        min_dist = min(chroma_scores.values())
        max_dist = max(chroma_scores.values())
        # 距离越小，相似度越高 → (max_dist - dist) 映射到 0~1
        return {
            resume_id: (max_dist - dist) / (max_dist - min_dist + 1e-8)
            for resume_id, dist in chroma_scores.items()
        }

    def hybrid_search(self, jobposting_id, query_text, top_k=10, score_threshold=0.5):
        # 1. ES 检索（业务 ID + 匹配得分）
        es_scores = self.es_retriever.search(query_text, top_k * 2)
        logger.info(f"ES 检索结果: {es_scores}")  # 新增日志
        if not es_scores:
            logger.warning("ES 检索未返回任何结果")
            return []  # ES 无结果时返回空列表

        # 2. Chroma 检索（业务 ID + 归一化相似度）
        chroma_scores = self.chroma_retriever.search(jobposting_id, query_text, top_k * 2)
        logger.info(f"Chroma 检索结果: {chroma_scores}")  # 新增日志

        normalized_chroma = self._normalize_chroma_distance(chroma_scores)

        # 3. 合并结果（业务 ID 对齐，统一为字符串类型）
        all_scores = {}
        # 合并 ES 得分（确保 resume_id 为字符串）
        for resume_id, score in es_scores.items():
            all_scores[resume_id] = all_scores.get(resume_id, 0) + score * 0.6  # 从 0.5→0.7
        for resume_id, score in normalized_chroma.items():
            all_scores[resume_id] = all_scores.get(resume_id, 0) + score * 0.4

        # 4. 过滤并排序
        filtered = {rid: s for rid, s in all_scores.items() if s >= score_threshold}
        return sorted([{"resume_id": rid, "score": s} for rid, s in filtered.items()],
                        key=lambda x: x["score"], reverse=True)[:top_k]

    """ 更新简历处理状态 """
    def update_delivery_status(self, deliver, status):
        try:
            deliver.resume_status = status
            deliver.save()
        except Exception as e:
            logger.error(f"更新简历处理状态失败: {str(e)}，deliver_id: {deliver.id}，status: {status}")
            raise


    """ 自动评分卡以及标记关键匹配点/风险点 """

    def score_and_mark(self, job_id, resume_id):
        """
        自动评分卡以及标记关键匹配点/风险点，结合 LLM 优化学历、技能、工作经验匹配判断，包含容错处理
        :param job_id: 职位 ID
        :param resume_id: 简历 ID
        :return: 包含评分、匹配点、风险点的字典，异常时合理降级返回
        """
        try:
            # 1. 获取职位信息（校验返回类型）
            job_info = self.process_job_resumes(job_id)
            if not isinstance(job_info, dict):
                raise ValueError(
                    f"process_job_resumes 返回类型错误，应为 dict，实际是 {type(job_info)}，返回内容：{job_info}")

            # 2. 获取简历信息（校验返回类型及数据存在性）
            resume_data_list: QuerySet = ResumeParseData.objects.filter(jobinfo_id=resume_id)
            if not resume_data_list.exists():
                raise Exception(f"未找到 jobinfo_id={resume_id} 的简历数据")
            resume_data = resume_data_list.first()
            # 校验关键属性存在性，可根据实际模型补充更多必要属性校验
            required_attributes = ['education', 'skills', 'work_experience']
            for attr in required_attributes:
                if not hasattr(resume_data, attr):
                    raise AttributeError(f"简历数据 {resume_data} 缺少必要属性 '{attr}'")

            score = 0
            match_points = []
            risk_points = []

            # -------------------- 1. 学历匹配优化：调用 LLM 判断 --------------------
            job_education = job_info.get("education", "")
            candidate_education = getattr(resume_data, 'education', "")
            if job_education and candidate_education:
                education_prompt = f"""
                职位学历要求：{job_education}
                候选人学历：{candidate_education}
                请判断候选人学历是否满足职位要求，输出简洁结论（如“满足”“不满足”），并说明理由。
                注意：理解“大专及以上”“本科优先”等表述的含义，比如本科满足大专及以上的要求。
                """
                try:
                    education_resp = self.llm_client._call_qianwen(education_prompt)
                    if "满足" in education_resp:
                        score += 1
                        match_points.append(
                            f"候选人学历 {candidate_education} 满足岗位要求：{job_education}，依据：{education_resp}")
                    else:
                        risk_points.append(
                            f"候选人学历 {candidate_education} 不满足岗位要求：{job_education}，依据：{education_resp}")
                except Exception as e:
                    logger.error(f"调用 LLM 处理学历匹配失败: {str(e)}，job_id: {job_id}, resume_id: {resume_id}")
                    # 降级：使用原始字符串匹配（可根据业务调整逻辑）
                    if candidate_education == job_education:
                        score += 1
                        match_points.append(f"候选人学历 {candidate_education} 与岗位要求匹配")
                    else:
                        risk_points.append(f"候选人学历 {candidate_education} 与岗位要求 {job_education} 不匹配")
            else:
                logger.warning(f"职位或简历学历信息缺失 - 职位：{job_education}, 简历：{candidate_education}")
                risk_points.append(f"职位或简历学历信息缺失，无法准确判断学历匹配情况")

            # -------------------- 2. 技能匹配优化：调用 LLM 提取有效技能并匹配 --------------------
            job_skills_raw = job_info.get("skills", "")
            candidate_skills_raw = getattr(resume_data, 'skills', "")
            candidate_work_exp = getattr(resume_data, 'work_experience', "")
            skills_prompt = f"""
            职位技能要求：{job_skills_raw}
            候选人技能：{candidate_skills_raw}
            候选人工作经历：{candidate_work_exp}

            请完成以下任务：
            1. 从职位技能要求中提取真正有价值、明确的技能关键词，过滤掉表意模糊（如“项目”“适配” ）、无实际技能意义的词汇。
            2. 从候选人技能和工作经历中，提取与职位技能要求相关的有效技能，同样过滤无效词汇。
            3. 对比清理后的职位技能和候选人技能，分别列出匹配的技能和缺失的技能。
            请以 JSON 格式输出结果，示例：
            {{
                "cleaned_job_skills": ["Python", "Django"],
                "cleaned_candidate_skills": ["Python", "Flask"],
                "matched_skills": ["Python"],
                "missing_skills": ["Django"]
            }}
            """
            try:
                skills_resp = self.llm_client._call_qianwen(skills_prompt)
                if "（LLM 调用失败" in skills_resp:
                    raise Exception(skills_resp)
                skills_data = json.loads(skills_resp)
                # 处理技能匹配结果
                if skills_data.get("matched_skills"):
                    score += len(skills_data["matched_skills"])
                    match_points.append(f"候选人具备岗位要求的技能: {', '.join(skills_data['matched_skills'])}")
                if skills_data.get("missing_skills"):
                    risk_points.append(f"候选人缺失岗位要求的技能: {', '.join(skills_data['missing_skills'])}")
                    # 检查是否有相关工作经验（基于 LLM 提取的清理后技能）
                    similar_experience_prompt = f"""
                    候选人工作经历：{candidate_work_exp}
                    缺失技能：{', '.join(skills_data['missing_skills'])}
                    请判断候选人是否有与缺失技能相关的工作经验，有则列出，无则回复“无”。
                    """
                    similar_exp_resp = self.llm_client._call_qianwen(similar_experience_prompt)
                    if similar_exp_resp.strip() != "无":
                        match_points.append(f"候选人有与缺失技能相关的工作经验: {similar_exp_resp}")
            except Exception as e:
                logger.error(f"调用 LLM 处理技能匹配失败: {str(e)}，job_id: {job_id}, resume_id: {resume_id}")
                # 降级：直接使用原始集合操作逻辑
                job_skills = set(job_skills_raw.split())
                resume_skills = set(candidate_skills_raw)
                common_skills = job_skills.intersection(resume_skills)
                score += len(common_skills)
                if common_skills:
                    match_points.append(f"候选人具备岗位要求的技能: {', '.join(common_skills)}")
                missing_skills = job_skills - resume_skills
                if missing_skills:
                    risk_points.append(f"候选人缺失岗位要求的技能: {', '.join(missing_skills)}")
                    similar_experience = [exp for exp in candidate_work_exp if
                                          any(skill in exp for skill in missing_skills)]
                    if similar_experience:
                        match_points.append(f"候选人有与缺失技能相关的工作经验: {', '.join(similar_experience)}")


            # -------------------- 3. 工作经验匹配（可按需用 LLM 优化） --------------------
            job_exp_require = job_info.get("working_exp", "经验不限")
            candidate_work_exp = getattr(resume_data, 'work_experience', "")
            exp_prompt = f"""
            职位经验要求：{job_exp_require}
            候选人工作经历：{candidate_work_exp}
            请判断候选人工作经验是否符合岗位要求，输出简洁结论和理由。
            """
            try:
                exp_resp = self.llm_client._call_qianwen(exp_prompt)
                # exp_resp = llm.call_llm(exp_prompt)
                if "符合" in exp_resp:
                    score += 1
                    match_points.append(f"候选人工作经验符合岗位要求，依据：{exp_resp}")
                else:
                    risk_points.append(f"候选人工作经验可能不符合岗位要求，依据：{exp_resp}")
            except Exception as e:
                logger.error(f"调用 LLM 处理工作经验匹配失败: {str(e)}，job_id: {job_id}, resume_id: {resume_id}")
                # 降级：简单判断是否有工作经验（可根据业务调整更复杂逻辑）
                if job_exp_require != "经验不限" and candidate_work_exp:
                    score += 1
                    match_points.append("候选人有工作经验，符合岗位要求（LLM 调用失败，降级判断）")
                elif job_exp_require != "经验不限" and not candidate_work_exp:
                    risk_points.append("候选人无工作经验，可能不符合岗位要求（LLM 调用失败，降级判断）")

            return {
                "resume_id": resume_id,
                "score": score,
                "match_points": match_points,
                "risk_points": risk_points
            }

        except AttributeError as e:
            logger.error(f"属性访问错误: {str(e)}，job_id: {job_id}, resume_id: {resume_id}")
            return {
                "resume_id": resume_id,
                "score": 0,
                "match_points": [],
                "risk_points": [f"自动评分失败：属性访问错误 - {str(e)}"]
            }
        except ValueError as e:
            logger.error(f"值错误: {str(e)}，job_id: {job_id}, resume_id: {resume_id}")
            return {
                "resume_id": resume_id,
                "score": 0,
                "match_points": [],
                "risk_points": [f"自动评分失败：值错误 - {str(e)}"]
            }
        except Exception as e:
            logger.error(f"自动评分和标记失败: {str(e)}，job_id: {job_id}，resume_id: {resume_id}")
            return {
                "resume_id": resume_id,
                "score": 0,
                "match_points": [],
                "risk_points": [f"自动评分失败：{str(e)}"]
            }


    """ 处理单个简历的完整流程 """

    def process_and_score_single_resume(self, job_id, resume_id):
        try:
            # 1. 查询投递记录
            deliver = DeliverJobposting.objects.select_related('jobinfo').get(
                jobposting_id=job_id,
                jobinfo_id=resume_id,
                resume_status=0
            )
            print(f"[DEBUG] 找到投递记录: {deliver.id}")

        except DeliverJobposting.DoesNotExist as e:
            logger.info(f"未找到待处理简历: job_id={job_id}, resume_id={resume_id}")
            return {"status": "failed", "reason": "未找到待处理简历"}
        except Exception as e:
            logger.error(f"查找投递记录异常: {e}")
            return {"status": "failed", "reason": str(e)}

        # 2. 处理简历内容
        try:
            with transaction.atomic():
                print("到达处理简历 处理中")
                self.update_delivery_status(deliver, 1)  # 标记为处理中

                # 提取简历内容
                jobinfo = deliver.jobinfo
                resume_url = jobinfo.resume_url
                print("提取内容4")
                raw_text = self.extract_content(resume_url)
                print(f"[DEBUG] 提取到原始内容: {raw_text[:100]}")

                # 结构化处理
                structured_data = self.extract_structured_data(raw_text)
                print("结构化数据5")

                # 保存到MySQL
                resume_parse = self.save_to_mysql(job_id, resume_id, raw_text, structured_data)
                resume_parse_id = resume_parse.id  # 结构化简历ID
                print("保存到MySQL6")

                # 同步到ES和Chroma
                self.es_retriever.sync_document(job_id, resume_id, structured_data)
                print("同步到ES7")
                self.es_retriever.refresh_index()
                self.chroma_retriever.sync_vector(
                    jobposting_id=job_id,
                    resume_id=resume_id,
                    text=raw_text  # 原始文本用于向量化
                )
                print("同步到Chroma8")

                # 更新为处理成功
                self.update_delivery_status(deliver, 3)
                print("更新为处理成功9")

                # 构建职位查询文本
                query_text_parts = self.process_job_resumes(job_id)
                query_text = " ".join(str(value) for value in query_text_parts.values())
                print("获取职位关键信息10")

                # 检索并评分
                best_matches = self.hybrid_search(job_id, query_text)
                print(f"[DEBUG] 检索结果 best_matches: {best_matches}")

                # 【插入日志的位置】在这里插入日志，输出关键ID信息
                logger.info(f"待匹配简历 ID（业务）: {resume_id}, 结构化 ID: {resume_parse_id}")
                logger.info(f"检索结果 best_matches 中的 ID: {[m['resume_id'] for m in best_matches]}")
                # 明确转换为字符串（避免隐式类型错误）
                target_id = str(resume_id)
                best_matches = [
                    {"resume_id": str(m["resume_id"]), "score": m["score"]}
                    for m in best_matches
                ]

                # 重新匹配（确保类型一致）
                match = next(
                    (m for m in best_matches if m["resume_id"] == target_id),
                    None
                )
                print(f"当前简历匹配结果: {match}")

                # 构建任务结果（确保结果完整）
                result = {
                    "resume_id": resume_id,
                    "resume_parse_id": resume_parse_id,
                    "job_id": job_id,
                    "match_found": bool(match),
                    "score": match["score"] if match else None,
                    "status": "SUCCESS" if match else "NO_MATCH",
                    "message": "简历匹配度较高" if match else "简历匹配度较低"
                }

                # 若匹配成功，初筛记录
                if match:
                    score_info = self.score_and_mark(job_id, resume_id)
                    self._save_screening_assessment(deliver, match, score_info)  # 修改：调用新方法保存评估信息
                    logger.info(f"已为简历{resume_id}保存初筛评估信息")
                    result["score_info"] = score_info  # 补充评分详情
                else:
                    logger.info(f"简历{resume_id}未达到面试匹配条件")
                    # 更新投递表 ai初筛状态
                    deliver.inter_status = 2
                    deliver.save()

                # 返回完整结果（确保Celery能捕获）
                return result

        except Exception as e:
            self.update_delivery_status(deliver, 0)  # 异常时重置为未处理
            logger.error(f"处理异常: {e}")
            return {"status": "failed", "reason": str(e)}



    """ 保存初筛评估信息 llm生成评价  进行结果推送 """
    def _save_screening_assessment(self, deliver, match, score_info):
        if not match:
            logger.warning("未找到匹配结果，跳过 LLM 生成和推送")
            return
        try:
            # 获取三方信息
            job_info = self.process_job_resumes(deliver.jobposting_id)  # 职位信息
            # job_info = Jobposting.objects.get(id=deliver.jobposting_id)
            resume_data = ResumeParseData.objects.filter(jobinfo_id=deliver.jobinfo_id).order_by('-id').first()
            # 构造interview信息（后续会保存到数据库）
            interview_info = {
                "ai_match_score": match["score"],
                "ai_search_score": score_info["score"],
                "ai_match_points": score_info["match_points"],
                "ai_risk_points": score_info["risk_points"]
            }

            # 调用升级后的LLM方法
            hr_evaluation = llm.render_for_hr(job_info, resume_data, interview_info)
            user_evaluation = llm.render_for_user(job_info, resume_data, interview_info)

            # 直接保存初筛评估信息到Interview模型
            Interview.objects.create(
                deliverjob=deliver,     # 投递记录
                ai_search_score=score_info["score"],
                ai_match_score=match["score"],
                ai_match_points=score_info["match_points"],
                ai_risk_points=score_info["risk_points"],
                user_evaluation=user_evaluation,
                hr_evaluation=hr_evaluation,
                end_at=timezone.now()  # 记录初筛结束时间
            )
            # 更新投递表 ai初筛状态
            deliver.inter_status = 1
            deliver.save()
            logger.info(f"初筛评估信息保存成功: deliver_id={deliver.id}")

            # 如果匹配成功（即通过初筛），则进行推送
            if match["score"] >= 0.5:
                # 推送结果给 HR 和 求职者
                push_screening_result_to_hr(
                    jobposting_id=deliver.jobposting_id,
                    deliverjob_id=deliver.id,
                    evaluation_text={
                        "hr_evaluation": hr_evaluation,
                        "score_info": score_info
                    }  # 评估结果详情
                )

                push_screening_result_to_user(
                    user_id=deliver.jobinfo.user_id,
                    evaluation_text={
                        "user_evaluation": user_evaluation,
                        "score_info": score_info
                    }  # 评估结果详情
                )
        except Exception as e:
            logger.error(f"保存初筛评估信息异常: {e}")
            raise



""" Celery任务基类 """
class ResumeProcessingTask(Task):
    max_retries = 3  # 最大重试次数
    default_retry_delay = 5  # 失败重试间隔时间
    def on_failure(self, exc, task_id, args, kwargs, einfo):
        logging.error(f"简历处理失败: {exc}", exc_info=einfo)


""" 简历处理全流程任务 """
@shared_task(base=ResumeProcessingTask)
def process_resumes(job_id, resume_id=None):
    """处理指定职位的简历"""
    try:
        print(f"Celery 任务启动: job_id={job_id}, resume_id={resume_id}")
        if resume_id:
            # 处理单份简历，返回结果（确保任务结果被捕获）
            result = _process_single_resume(job_id, resume_id, celery_task_id=current_task.request.id)
            return result  # 关键：返回结果，让Celery存储
        else:
            # 批量处理：派发单个任务
            resume_ids = DeliverJobposting.objects.filter(
                jobposting_id=job_id, resume_status=0
            ).values_list('jobinfo_id', flat=True)
            print("resume_ids", resume_ids)
            for rid in resume_ids:
                process_resumes.delay(job_id, rid)  # 异步派发
            return {"status": "SUCCESS", "message": f"已派发{len(resume_ids)}个简历处理任务"}

    except Exception as e:
        TaskRedisManager.update_task_status(
            resume_id=resume_id,
            job_id=job_id,
            status='FAILURE'
        )
        print(f"简历处理失败: {e}")
        logger.error(f"简历处理失败: {e}")
        raise

import uuid
def _process_single_resume(job_id, resume_id, celery_task_id=None):
    # 1. 写入任务初始状态
    print("hello")
    task_id = celery_task_id or f"{job_id}_{resume_id}_{uuid.uuid4().hex[:8]}"
    TaskRedisManager.save_task_meta(
        resume_id=resume_id,
        job_id=job_id,
        task_id=celery_task_id or task_id,
        status='STARTED'
    )
    print("任务元信息")

    try:
        print("[DEBUG] 尝试初始化 ResumeProcessor")
        processor = ResumeProcessor()
        print("处理简历2")
        # 执行处理流程并获取结果
        result = processor.process_and_score_single_resume(job_id, resume_id)
        # 更新任务状态为成功
        TaskRedisManager.update_task_status(
            resume_id=resume_id,
            job_id=job_id,
            status='SUCCESS'
        )
        return result  # 关键：返回处理结果

    except Exception as e:
        print(f"[CRITICAL] 处理失败: {str(e)}")
        print(traceback.format_exc())
        TaskRedisManager.update_task_status(
            resume_id=resume_id,
            job_id=job_id,
            status='FAILURE'
        )
        return {"status": "FAILURE", "reason": str(e)}

