#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
基于简历的混合检索系统 - 完整实现

功能概述：
1. 解析用户上传的简历（PDF/Word/TXT）
2. 提取关键信息（技能、经验、教育背景等）
3. 生成简历向量表示
4. 执行混合检索（ES关键词 + Milvus语义）
5. 结果融合、去重、排序
6. 使用LLM深度分析匹配度
7. 生成个性化推荐理由
8. 返回完整推荐结果

作者：AI助手
日期：2024
"""

import re
import json
import numpy as np
from typing import List, Dict, Any, Optional, Tuple
from openai import OpenAI
import os
import sys
import django
from datetime import datetime

# Django环境配置
sys.path.append('/Users/baimu/PycharmProjects/2504A/bossxm/boss')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'boss.settings')
django.setup()

# 导入Django模型和相关库
from home.models import Jobposting
from user.models import City, Company
from django.db.models import Q

# 导入向量化和检索库
try:
    import dashscope
    from dashscope import TextEmbedding
    DASHSCOPE_AVAILABLE = True
except ImportError:
    print("⚠️ dashscope库未安装，请运行: pip install dashscope")
    DASHSCOPE_AVAILABLE = False

try:
    from elasticsearch import Elasticsearch
    ES_AVAILABLE = True
except ImportError:
    print("⚠️ elasticsearch库未安装，请运行: pip install elasticsearch")
    ES_AVAILABLE = False

try:
    from pymilvus import connections, Collection, utility
    MILVUS_AVAILABLE = True
except ImportError:
    print("⚠️ pymilvus库未安装，请运行: pip install pymilvus")
    MILVUS_AVAILABLE = False

# ===================================
# 配置管理模块
# ===================================

class Config:
    """配置管理类"""
    
    # DashScope配置
    DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY", "sk-2a0f2ca7b4104bb59fd8d1906d54ee22")
    DASHSCOPE_MODEL = "text-embedding-v4"
    DASHSCOPE_BATCH_SIZE = 25
    
    # Elasticsearch配置
    ELASTICSEARCH_HOST = os.getenv("ELASTICSEARCH_HOST", "localhost")
    ELASTICSEARCH_PORT = int(os.getenv("ELASTICSEARCH_PORT", "9200"))
    ELASTICSEARCH_INDEX = "job_positions"
    ELASTICSEARCH_TIMEOUT = 30
    
    # Milvus配置
    MILVUS_HOST = os.getenv("MILVUS_HOST", "localhost")
    MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
    MILVUS_COLLECTION = "job_list_optimized"
    MILVUS_NPROBE = 10
    
    # 检索配置
    DEFAULT_TOP_K = 50
    MAX_RESUME_LENGTH = 10000  # 最大简历长度
    VECTOR_DIMENSION = 1024
    
    # LLM配置
    LLM_BASE_URL = os.getenv("LLM_BASE_URL", "http://localhost:11434/v1")
    LLM_MODEL = os.getenv("LLM_MODEL", "qwen:latest")
    
    @classmethod
    def validate_config(cls):
        """验证配置"""
        issues = []
        
        if not cls.DASHSCOPE_API_KEY or cls.DASHSCOPE_API_KEY == "your-api-key-here":
            issues.append("⚠️ DashScope API Key未配置")
        
        if not cls.LLM_BASE_URL:
            issues.append("⚠️ LLM服务地址未配置")
            
        return issues


# 使用配置类
config = Config()

# 验证配置
config_issues = config.validate_config()
if config_issues:
    print("配置问题：")
    for issue in config_issues:
        print(f"  {issue}")


# ===================================
# 错误处理和日志模块
# ===================================

import logging
from functools import wraps

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('resume_hybrid_search.log'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)


def handle_exceptions(func):
    """异常处理装饰器"""
    @wraps(func)
    def wrapper(*args, **kwargs):
        try:
            return func(*args, **kwargs)
        except Exception as e:
            logger.error(f"函数 {func.__name__} 执行失败: {e}")
            print(f"❌ {func.__name__} 执行失败: {e}")
            return None
    return wrapper


def safe_call(func, *args, default=None, **kwargs):
    """安全调用函数，失败时返回默认值"""
    try:
        return func(*args, **kwargs)
    except Exception as e:
        logger.warning(f"安全调用 {func.__name__} 失败: {e}")
        return default


# ===================================
# 第一部分：简历解析模块
# ===================================

def parse_resume_content(file_path: str) -> str:
    """
    解析简历内容，支持PDF、Word、TXT格式
    
    Args:
        file_path: 简历文件路径
        
    Returns:
        str: 简历的纯文本内容
        
    核心思路：
    1. 根据文件扩展名选择解析方法
    2. 提取纯文本内容
    3. 清理和格式化文本
    """
    print(f"📄 开始解析简历文件: {file_path}")
    
    file_extension = file_path.lower().split('.')[-1]
    
    if file_extension == 'pdf':
        return extract_text_from_pdf(file_path)
    elif file_extension in ['doc', 'docx']:
        return extract_text_from_word(file_path)
    elif file_extension == 'txt':
        return extract_text_from_txt(file_path)
    else:
        raise ValueError(f"❌ 不支持的文件格式: {file_extension}")


def extract_text_from_pdf(file_path: str) -> str:
    """从PDF文件中提取文本"""
    try:
        import PyPDF2
        
        text = ""
        with open(file_path, 'rb') as file:
            pdf_reader = PyPDF2.PdfReader(file)
            for page in pdf_reader.pages:
                text += page.extract_text() + "\n"
        
        return clean_text(text)
    
    except ImportError:
        print("❌ 需要安装PyPDF2: pip install PyPDF2")
        return ""
    except Exception as e:
        print(f"❌ PDF解析失败: {e}")
        return ""


def extract_text_from_word(file_path: str) -> str:
    """从Word文件中提取文本"""
    try:
        from docx import Document
        
        doc = Document(file_path)
        text = ""
        for paragraph in doc.paragraphs:
            text += paragraph.text + "\n"
        
        return clean_text(text)
    
    except ImportError:
        print("❌ 需要安装python-docx: pip install python-docx")
        return ""
    except Exception as e:
        print(f"❌ Word解析失败: {e}")
        return ""


def extract_text_from_txt(file_path: str) -> str:
    """从TXT文件中提取文本"""
    try:
        with open(file_path, 'r', encoding='utf-8') as file:
            text = file.read()
        
        return clean_text(text)
    
    except Exception as e:
        print(f"❌ TXT解析失败: {e}")
        return ""


def clean_text(text: str) -> str:
    """清理和格式化文本"""
    # 移除多余的空白字符
    text = re.sub(r'\s+', ' ', text)
    
    # 移除特殊字符（保留中文）
    text = re.sub(r'[^\w\s\u4e00-\u9fff\-\+\.]', '', text)
    
    # 去除首尾空白
    text = text.strip()
    
    return text


# ===================================
# 第二部分：关键信息提取模块
# ===================================

def extract_key_info_from_resume(resume_text: str) -> Dict[str, Any]:
    """
    从简历文本中提取关键信息
    
    Args:
        resume_text: 简历的纯文本内容
        
    Returns:
        Dict: 包含提取出的关键信息
        
    提取内容：
    1. 技能关键词（按类别分组）
    2. 工作经验年限
    3. 教育背景
    4. 期望地点
    5. 期望薪资
    6. 原始文本
    """
    print("🔍 开始提取简历关键信息...")
    
    key_info = {
        'skills': extract_skills(resume_text),
        'experience_years': extract_experience_years(resume_text),
        'education': extract_education(resume_text),
        'preferred_location': extract_location(resume_text),
        'expected_salary': extract_salary(resume_text),
        'raw_text': resume_text,
        'resume_length': len(resume_text)
    }
    
    print(f"✅ 关键信息提取完成: 技能{len(key_info['skills']['all_skills'])}项, "
          f"经验{key_info['experience_years']}年, 教育{key_info['education']}")
    
    return key_info


def extract_skills(text: str) -> Dict[str, Any]:
    """
    提取技能关键词
    
    策略：
    1. 使用预定义技能词库
    2. 按技术类别分组
    3. 在简历文本中匹配
    4. 返回分类和汇总结果
    """
    
    # 预定义技能词库（可根据实际需要扩展）
    skill_keywords = {
        'programming_languages': [
            'Java', 'Python', 'JavaScript', 'TypeScript', 'C++', 'C#', 'Go', 'PHP', 
            'Swift', 'Kotlin', 'Scala', 'Ruby', 'Rust'
        ],
        'frameworks': [
            'Spring', 'SpringBoot', 'Django', 'Flask', 'React', 'Vue', 'Angular', 
            'Express', 'FastAPI', 'Laravel', 'Rails'
        ],
        'databases': [
            'MySQL', 'PostgreSQL', 'MongoDB', 'Redis', 'Oracle', 'SQLServer', 
            'Elasticsearch', 'Cassandra', 'Neo4j'
        ],
        'tools': [
            'Git', 'Docker', 'Kubernetes', 'Jenkins', 'Maven', 'Gradle', 'Webpack', 
            'Nginx', 'Apache', 'Linux', 'AWS', 'Azure'
        ],
        'big_data': [
            'Hadoop', 'Spark', 'Kafka', 'Flink', 'Storm', 'Hive', 'HBase', 'Zookeeper'
        ],
        'ai_ml': [
            '机器学习', '深度学习', 'TensorFlow', 'PyTorch', 'scikit-learn', 
            'Pandas', 'NumPy', 'OpenCV', 'NLP'
        ]
    }
    
    found_skills = {}
    text_lower = text.lower()
    
    # 在每个类别中查找匹配的技能
    for category, skills in skill_keywords.items():
        found_skills[category] = []
        for skill in skills:
            # 支持大小写不敏感匹配
            if skill.lower() in text_lower or skill in text:
                found_skills[category].append(skill)
    
    # 汇总所有找到的技能
    all_skills = []
    for category_skills in found_skills.values():
        all_skills.extend(category_skills)
    
    return {
        'by_category': found_skills,
        'all_skills': list(set(all_skills)),  # 去重
        'skill_count': len(set(all_skills))
    }


def extract_experience_years(text: str) -> int:
    """
    提取工作经验年限
    
    策略：
    1. 使用正则表达式匹配常见的经验描述模式
    2. 支持中英文表达
    3. 返回数值型年限
    """
    
    # 正则表达式模式（按优先级排序）
    patterns = [
        r'(\d+)\s*年.*?工作经验',      # "3年工作经验"
        r'工作经验.*?(\d+)\s*年',      # "工作经验3年"
        r'(\d+)\s*年.*?经验',         # "3年经验"
        r'经验.*?(\d+)\s*年',         # "经验3年"
        r'工作.*?(\d+)\s*年',         # "工作3年"
        r'(\d+)\s*years?\s*experience',  # "3 years experience"
        r'experience.*?(\d+)\s*years?',  # "experience 3 years"
    ]
    
    for pattern in patterns:
        matches = re.findall(pattern, text, re.IGNORECASE)
        if matches:
            try:
                years = int(matches[0])
                if 0 <= years <= 50:  # 合理性检查
                    return years
            except ValueError:
                continue
    
    return 0  # 默认0年经验（可能是应届生）


def extract_education(text: str) -> str:
    """
    提取教育背景
    
    策略：
    1. 匹配常见的学历关键词
    2. 按优先级返回最高学历
    """
    
    # 按学历层次排序（从高到低）
    education_keywords = [
        '博士', 'PhD', 'Dr',
        '硕士', '研究生', 'Master', 'MBA',
        '本科', '学士', 'Bachelor', '大学',
        '专科', '大专', 'Associate',
        '高中', '中专', '技校'
    ]
    
    text_lower = text.lower()
    
    for edu in education_keywords:
        if edu.lower() in text_lower or edu in text:
            return edu
    
    return '未明确'


def extract_location(text: str) -> Optional[str]:
    """
    提取期望工作地点
    
    策略：
    1. 匹配主要城市名称
    2. 寻找"期望"、"意向"等关键词附近的地点
    """
    
    # 主要城市列表
    major_cities = [
        '北京', '上海', '深圳', '广州', '杭州', '成都', 
        '武汉', '南京', '西安', '苏州', '天津', '重庆',
        '青岛', '大连', '厦门', '长沙', '济南', '郑州'
    ]
    
    # 首先查找期望相关的地点
    location_patterns = [
        r'期望.*?(?:地点|城市|地区).*?(' + '|'.join(major_cities) + ')',
        r'意向.*?(?:地点|城市|地区).*?(' + '|'.join(major_cities) + ')',
        r'工作地.*?(' + '|'.join(major_cities) + ')',
    ]
    
    for pattern in location_patterns:
        matches = re.findall(pattern, text)
        if matches:
            return matches[0]
    
    # 如果没有找到期望地点，查找文本中出现的城市
    for city in major_cities:
        if city in text:
            return city
    
    return None


def extract_salary(text: str) -> Optional[str]:
    """
    提取期望薪资
    
    策略：
    1. 匹配常见的薪资表达方式
    2. 支持K、万等单位
    """
    
    # 薪资匹配模式
    salary_patterns = [
        r'期望.*?薪资.*?(\d+(?:\-\d+)?[kK万])',
        r'薪资.*?期望.*?(\d+(?:\-\d+)?[kK万])',
        r'月薪.*?(\d+(?:\-\d+)?[kK万])',
        r'年薪.*?(\d+(?:\-\d+)?万)',
        r'(\d+(?:\-\d+)?)[kK].*?薪',
        r'(\d+(?:\-\d+)?)万.*?年薪'
    ]
    
    for pattern in salary_patterns:
        matches = re.findall(pattern, text, re.IGNORECASE)
        if matches:
            return matches[0]
    
    return None


# ===================================
# 第三部分：向量化处理模块
# ===================================

def generate_resume_embeddings(resume_info: Dict[str, Any]) -> Optional[List[float]]:
    """
    为简历生成向量表示
    
    Args:
        resume_info: 简历关键信息字典
        
    Returns:
        List[float]: 1024维向量表示，失败时返回None
        
    策略：
    1. 将简历不同部分组合成查询文本
    2. 根据经验水平优化查询策略
    3. 调用DashScope生成embeddings
    4. 返回向量表示
    """
    print("🔤 开始生成简历向量表示...")
    
    # 根据经验水平选择查询策略
    query_text = optimize_query_strategy(resume_info)
    
    print(f"📝 查询文本: {query_text[:100]}...")
    
    # 调用DashScope生成embeddings
    embeddings = call_dashscope_embedding([query_text])
    
    if embeddings and len(embeddings) > 0:
        print(f"✅ 向量生成成功，维度: {len(embeddings[0])}")
        return embeddings[0]
    else:
        print("❌ 向量生成失败")
        return None


def optimize_query_strategy(resume_info: Dict[str, Any]) -> str:
    """
    根据简历特点优化查询策略
    
    策略：
    1. 技术型人才：突出技能栈
    2. 管理型人才：突出经验和领导力  
    3. 应届生：突出教育背景和技能学习
    """
    
    experience_years = resume_info['experience_years']
    skills = resume_info['skills']['all_skills']
    education = resume_info['education']
    
    if experience_years == 0:
        # 应届生策略：突出教育背景和技能
        return create_fresh_graduate_query(resume_info)
    elif experience_years >= 5:
        # 高级人才策略：突出经验和高级技能
        return create_senior_talent_query(resume_info)
    else:
        # 中级人才策略：平衡技能和经验
        return create_mid_level_query(resume_info)


def create_fresh_graduate_query(resume_info: Dict[str, Any]) -> str:
    """应届生查询策略"""
    query_parts = []
    
    # 突出教育背景
    if resume_info['education'] != '未明确':
        query_parts.append(f"{resume_info['education']}应届毕业生")
    
    # 突出掌握的技能
    if resume_info['skills']['all_skills']:
        skills_text = ' '.join(resume_info['skills']['all_skills'][:8])  # 最多8个技能
        query_parts.append(f"掌握技能：{skills_text}")
    
    # 添加入门级关键词
    query_parts.append("初级开发工程师 实习生 校招 培训生")
    
    # 添加地点偏好
    if resume_info['preferred_location']:
        query_parts.append(f"期望地点：{resume_info['preferred_location']}")
    
    return ' '.join(query_parts)


def create_senior_talent_query(resume_info: Dict[str, Any]) -> str:
    """高级人才查询策略"""
    query_parts = []
    
    # 突出经验水平
    query_parts.append(f"{resume_info['experience_years']}年丰富工作经验")
    
    # 突出核心技能（分类展示）
    skills_by_category = resume_info['skills']['by_category']
    for category, skills in skills_by_category.items():
        if skills:
            category_name = {
                'programming_languages': '编程语言',
                'frameworks': '框架技术', 
                'databases': '数据库',
                'tools': '工具平台',
                'big_data': '大数据',
                'ai_ml': '人工智能'
            }.get(category, category)
            
            skills_text = ' '.join(skills[:3])  # 每类最多3个
            query_parts.append(f"精通{category_name}：{skills_text}")
    
    # 添加高级职位关键词
    query_parts.append("高级工程师 资深工程师 技术专家 架构师 技术负责人 团队领导")
    
    return ' '.join(query_parts)


def create_mid_level_query(resume_info: Dict[str, Any]) -> str:
    """中级人才查询策略"""
    query_parts = []
    
    # 平衡经验和技能
    query_parts.append(f"{resume_info['experience_years']}年工作经验")
    
    # 突出主要技能
    if resume_info['skills']['all_skills']:
        skills_text = ' '.join(resume_info['skills']['all_skills'][:10])
        query_parts.append(f"熟练掌握：{skills_text}")
    
    # 添加中级职位关键词
    query_parts.append("中级工程师 开发工程师 软件工程师")
    
    return ' '.join(query_parts)


def call_dashscope_embedding(texts: List[str]) -> Optional[List[List[float]]]:
    """
    调用DashScope生成文本向量
    
    Args:
        texts: 需要向量化的文本列表
        
    Returns:
        List[List[float]]: 向量列表，每个向量1024维
    """
    if not DASHSCOPE_AVAILABLE:
        print("❌ DashScope库未安装，无法生成向量")
        return None
        
    try:
        print(f"🔧 调用DashScope API生成{len(texts)}个文本向量...")
        
        # 配置API密钥
        dashscope.api_key = config.DASHSCOPE_API_KEY
        
        embeddings = []
        
        # 批量处理（每批最多25个）
        batch_size = config.DASHSCOPE_BATCH_SIZE
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            print(f"   处理批次 {i//batch_size + 1}: {len(batch_texts)}个文本")
            
            try:
                # 调用DashScope API
                response = TextEmbedding.call(
                    model=getattr(TextEmbedding.Models, config.DASHSCOPE_MODEL.replace('-', '_'), TextEmbedding.Models.text_embedding_v4),
                    input=batch_texts,
                    text_type="document"  # 文档类型
                )
                
                if response.status_code == 200:
                    # 提取embedding结果
                    batch_embeddings = [item['embedding'] for item in response.output['embeddings']]
                    embeddings.extend(batch_embeddings)
                    print(f"   ✅ 批次处理成功，生成{len(batch_embeddings)}个向量")
                else:
                    print(f"   ❌ API调用失败: {response.message}")
                    # 为失败的批次生成零向量占位
                    zero_embeddings = [[0.0] * config.VECTOR_DIMENSION for _ in batch_texts]
                    embeddings.extend(zero_embeddings)
                    
            except Exception as batch_e:
                print(f"   ❌ 批次处理失败: {batch_e}")
                # 为失败的批次生成零向量占位
                zero_embeddings = [[0.0] * config.VECTOR_DIMENSION for _ in batch_texts]
                embeddings.extend(zero_embeddings)
        
        print(f"✅ 向量生成完成，共{len(embeddings)}个{config.VECTOR_DIMENSION}维向量")
        return embeddings
        
    except Exception as e:
        print(f"❌ DashScope向量化失败: {e}")
        print("💡 请检查API密钥配置和网络连接")
        return None


# ===================================
# 第四部分：混合检索模块
# ===================================

def search_jobs_by_keywords(resume_info: Dict[str, Any], top_k: int = 50) -> List[Dict[str, Any]]:
    """
    基于关键词在Elasticsearch中检索职位
    
    Args:
        resume_info: 简历关键信息
        top_k: 返回结果数量
        
    Returns:
        List[Dict]: ES检索结果列表
        
    检索策略：
    1. 技能关键词匹配（权重最高）
    2. 经验要求匹配
    3. 地点过滤
    4. 薪资范围匹配
    """
    print(f"🔍 开始ES关键词检索（目标数量：{top_k}）...")
    
    if not ES_AVAILABLE:
        print("❌ Elasticsearch库未安装，跳过关键词检索")
        return []
    
    try:
        # 连接Elasticsearch
        es_client = connect_to_elasticsearch()
        if not es_client:
            print("❌ 无法连接到Elasticsearch")
            return []
        
        # 构建查询条件
        query_conditions = build_es_query(resume_info)
        print(f"📋 ES查询条件构建完成")
        
        # 执行搜索
        index_name = config.ELASTICSEARCH_INDEX
        
        try:
            # ES 8.x版本API
            response = es_client.search(
                index=index_name,
                size=top_k,
                query=query_conditions["query"],
                sort=query_conditions.get("sort", [])
            )
        except TypeError:
            try:
                # 旧版本兼容
                response = es_client.search(
                    index=index_name,
                    body=query_conditions,
                    size=top_k
                )
            except Exception:
                print("❌ ES搜索API调用失败")
                return []
        
        # 解析搜索结果
        hits = response['hits']['hits']
        total_info = response['hits']['total']
        
        # 兼容不同版本的total字段格式
        if isinstance(total_info, dict):
            total = total_info.get('value', 0)
        else:
            total = total_info
        
        print(f"✅ ES检索完成，共找到{total}个职位，返回前{len(hits)}个")
        
        # 转换为标准格式
        results = []
        for i, hit in enumerate(hits):
            source = hit['_source']
            result = {
                'job_id': source.get('job_id', ''),
                'title': source.get('title', ''),
                'company_name': source.get('company_name', ''),
                'city_name': source.get('city_name', ''),
                'money': source.get('money', ''),
                'description': source.get('description', ''),
                'requirements': source.get('full_text', ''),  # 使用full_text作为requirements
                'es_score': hit['_score'],
                'update_time': source.get('updated_at', ''),
                'source': 'elasticsearch'
            }
            results.append(result)
        
        # 🔍 打印ES检索的详细结果
        print(f"\n📊 ES检索详细结果 (前3个):")
        print("=" * 80)
        for i, result in enumerate(results[:3]):
            print(f"【ES结果 {i+1}】")
            print(f"   🆔 job_id: {result['job_id']}")
            print(f"   🎯 title: {result['title']}")
            print(f"   🏢 company: {result['company_name']}")
            print(f"   📍 location: {result['city_name']}")
            print(f"   💰 salary: {result['money']}")
            print(f"   📊 es_score: {result['es_score']:.4f}")
            print(f"   📝 description: {result['description'][:100]}...")
            print(f"   🔍 source: {result['source']}")
            print("-" * 40)
        
        if len(results) > 3:
            print(f"   ... 还有 {len(results) - 3} 个ES结果")
        print("=" * 80)
        
        return results
        
    except Exception as e:
        print(f"❌ ES检索失败: {e}")
        return []


def connect_to_elasticsearch():
    """连接到Elasticsearch服务器"""
    try:
        # ES 8.x版本连接（禁用SSL验证，适用于开发环境）
        try:
            es = Elasticsearch(
                hosts=[f'http://{config.ELASTICSEARCH_HOST}:{config.ELASTICSEARCH_PORT}'],
                verify_certs=False,
                ssl_show_warn=False,
                request_timeout=config.ELASTICSEARCH_TIMEOUT,
                max_retries=3,
                retry_on_timeout=True
            )
        except Exception as e1:
            print(f"ES 8.x连接尝试失败: {e1}")
            try:
                # 备用连接方式：简化配置
                es = Elasticsearch(
                    [f'http://{config.ELASTICSEARCH_HOST}:{config.ELASTICSEARCH_PORT}'],
                    verify_certs=False,
                    ssl_show_warn=False
                )
            except Exception as e2:
                print(f"备用连接失败: {e2}")
                # 最后尝试：ES 7.x兼容方式
                es = Elasticsearch(
                    [{'host': config.ELASTICSEARCH_HOST, 'port': config.ELASTICSEARCH_PORT, 'scheme': 'http'}]
                )
        
        if es.ping():
            print("✅ Elasticsearch连接成功")
            return es
        else:
            print("❌ Elasticsearch连接失败：服务器无响应")
            return None
            
    except Exception as e:
        print(f"❌ Elasticsearch连接失败: {str(e)}")
        print("💡 请确保Elasticsearch服务已启动（默认端口9200）")
        return None


def build_es_query(resume_info: Dict[str, Any]) -> Dict[str, Any]:
    """
    构建Elasticsearch查询条件
    
    查询逻辑：
    1. must: 必须匹配的条件
    2. should: 加分条件（OR关系）
    3. filter: 过滤条件
    4. boost: 权重调整
    """
    
    must_conditions = []
    should_conditions = []
    filter_conditions = []
    
    # 1. 技能匹配（最重要，权重最高）
    if resume_info['skills']['all_skills']:
        skills_text = ' '.join(resume_info['skills']['all_skills'])
        should_conditions.append({
            "multi_match": {
                "query": skills_text,
                "fields": ["title^3", "description^2", "requirements^2"],  # title权重最高
                "type": "best_fields",
                "boost": 3.0  # 技能匹配权重很高
            }
        })
        
        # 为每个技能单独匹配
        for skill in resume_info['skills']['all_skills'][:5]:  # 前5个重要技能
            should_conditions.append({
                "match": {
                    "description": {
                        "query": skill,
                        "boost": 2.0
                    }
                }
            })
    
    # 2. 经验匹配（基于描述内容）
    experience_years = resume_info['experience_years']
    if experience_years >= 0:
        if experience_years == 0:
            # 应届生：查找入门级职位
            should_conditions.append({
                "multi_match": {
                    "query": "应届 实习 入门 初级 培训生",
                    "fields": ["title", "description"],
                    "boost": 1.5
                }
            })
        else:
            # 有经验：根据经验年限匹配职位描述
            experience_terms = []
            if experience_years >= 1:
                experience_terms.append("1年以上")
            if experience_years >= 3:
                experience_terms.append("3年以上")
            if experience_years >= 5:
                experience_terms.append("5年以上")
            
            if experience_terms:
                should_conditions.append({
                    "multi_match": {
                        "query": " ".join(experience_terms),
                        "fields": ["description"],
                        "boost": 1.3
                    }
                })
    
    # 3. 地点偏好（不作为硬性过滤，改为加分项）
    if resume_info['preferred_location']:
        should_conditions.append({
            "match": {
                "city_name": {
                    "query": resume_info['preferred_location'],
                    "boost": 2.0
                }
            }
        })
    
    # 4. 教育背景匹配（基于描述内容）
    if resume_info['education'] not in ['未明确']:
        education_terms = {
            'Dr': '博士',
            '硕士': '硕士 研究生',
            '本科': '本科 学士',
            '大专': '大专'
        }
        education_query = education_terms.get(resume_info['education'], resume_info['education'])
        should_conditions.append({
            "match": {
                "description": {
                    "query": education_query,
                    "boost": 1.2
                }
            }
        })
    
    # 构建完整查询（移除filter条件，全部改为should加分）
    query = {
        "query": {
            "bool": {
                "should": should_conditions,
                "minimum_should_match": 1  # 至少匹配一个should条件
            }
        },
        "sort": [
            {"_score": {"order": "desc"}}           # 按相关性排序
        ]
    }
    
    return query





def search_jobs_by_vector(resume_embedding: List[float], top_k: int = 50) -> List[Dict[str, Any]]:
    """
    基于向量相似度在Milvus中检索职位
    
    Args:
        resume_embedding: 简历的向量表示
        top_k: 返回结果数量
        
    Returns:
        List[Dict]: Milvus检索结果列表
        
    检索策略：
    1. L2距离计算相似度
    2. 返回最相似的top_k个职位
    3. 转换距离为相似度分数
    """
    print(f"🧠 开始Milvus向量检索（目标数量：{top_k}）...")
    
    if not MILVUS_AVAILABLE:
        print("❌ PyMilvus库未安装，跳过向量检索")
        return []
    
    try:
        # 连接Milvus
        milvus_client = connect_to_milvus()
        if not milvus_client:
            print("❌ 无法连接到Milvus")
            return []
        
        print(f"🔍 查询向量维度: {len(resume_embedding)}")
        
        # 检查集合是否存在
        if not utility.has_collection(config.MILVUS_COLLECTION):
            print(f"❌ Milvus集合 '{config.MILVUS_COLLECTION}' 不存在")
            return []
        
        # 获取集合
        collection = Collection(config.MILVUS_COLLECTION)
        
        # 确保集合已加载
        collection.load()
        
        # 构建搜索参数
        search_params = {
            "metric_type": "L2",    # L2距离
            "offset": 0,
            "ignore_growing": False,
            "params": {"nprobe": config.MILVUS_NPROBE}
        }
        
        # 执行向量搜索
        results = collection.search(
            data=[resume_embedding],        # 查询向量
            anns_field="embedding",         # 向量字段名
            param=search_params,           # 搜索参数
            limit=top_k,                   # 返回数量
            output_fields=["id", "title", "company_name", "city_name", "money", "description", "full_text"]  # 返回字段（job_id改为id）
        )
        
        # 解析搜索结果
        search_results = []
        if results and len(results) > 0:
            for hit in results[0]:  # results[0]是第一个查询的结果
                # 计算相似度分数
                distance = hit.distance
                similarity_score = convert_distance_to_score(distance)
                
                result = {
                    'job_id': hit.entity.get('id', ''),  # 改为id字段
                    'title': hit.entity.get('title', ''),
                    'company_name': hit.entity.get('company_name', ''),
                    'city_name': hit.entity.get('city_name', ''),
                    'money': hit.entity.get('money', ''),
                    'description': hit.entity.get('description', ''),
                    'requirements': hit.entity.get('full_text', ''),
                    'vector_distance': distance,
                    'vector_score': similarity_score,
                    'source': 'milvus'
                }
                search_results.append(result)
        
        print(f"✅ Milvus检索完成，找到 {len(search_results)} 个语义相似职位")
        
        # 🔍 打印Milvus检索的详细结果
        print(f"\n🧠 Milvus检索详细结果 (前3个):")
        print("=" * 80)
        for i, result in enumerate(search_results[:3]):
            print(f"【Milvus结果 {i+1}】")
            print(f"   🆔 job_id: {result['job_id']}")
            print(f"   🎯 title: {result['title']}")
            print(f"   🏢 company: {result['company_name']}")
            print(f"   📍 location: {result['city_name']}")
            print(f"   💰 salary: {result['money']}")
            print(f"   📊 vector_score: {result['vector_score']:.4f}")
            print(f"   📏 vector_distance: {result['vector_distance']:.4f}")
            print(f"   📝 description: {result['description'][:100]}...")
            print(f"   🔍 source: {result['source']}")
            print("-" * 40)
        
        if len(search_results) > 3:
            print(f"   ... 还有 {len(search_results) - 3} 个Milvus结果")
        print("=" * 80)
        
        return search_results
        
    except Exception as e:
        print(f"❌ Milvus检索失败: {e}")
        return []


def connect_to_milvus():
    """连接到Milvus向量数据库"""
    try:
        # 建立连接
        connections.connect(
            alias="default",
            host=config.MILVUS_HOST,
            port=config.MILVUS_PORT
        )
        
        print("✅ Milvus连接成功")
        return True
        
    except Exception as e:
        print(f"❌ Milvus连接失败: {e}")
        print("💡 请确保Milvus服务已启动（默认端口19530）")
        return False





def get_job_details_from_django(job_ids: List[str]) -> Dict[str, Dict[str, Any]]:
    """
    从Django数据库获取职位详细信息
    
    Args:
        job_ids: 职位ID列表
        
    Returns:
        Dict: 以job_id为key的职位详细信息字典
    """
    try:
        # 查询数据库获取职位信息
        jobs = Jobposting.objects.filter(id__in=job_ids).select_related('city', 'company')
        
        job_details = {}
        for job in jobs:
            job_details[str(job.id)] = {
                'job_id': str(job.id),
                'title': job.title or '',
                'company_name': job.company.name if job.company else '未知公司',
                'city_name': job.city.name if job.city else '未知城市',
                'money': f"{job.money}" if job.money else '',
                'description': job.description or '',
                'education': job.education or '',
                'working': job.working,
                'working_text': job.get_working_display() if hasattr(job, 'get_working_display') else '',
                'type': job.type,
                'type_text': job.get_type_display() if hasattr(job, 'get_type_display') else '',
                'label': job.label or '',
                'browse_number': job.browse_number or 0,
                'collect_number': job.collect_number or 0,
                'deliver_number': job.deliver_number or 0,
                'created_at': job.created_at.isoformat() if job.created_at else '',
                'updated_at': job.updated_at.isoformat() if job.updated_at else ''
            }
        
        print(f"✅ 从Django数据库获取了{len(job_details)}个职位的详细信息")
        return job_details
        
    except Exception as e:
        print(f"❌ Django数据库查询失败: {e}")
        return {}



def enrich_search_results_with_django_data(search_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """
    使用Django数据库数据丰富搜索结果
    
    Args:
        search_results: 搜索结果列表
        
    Returns:
        List[Dict]: 丰富后的搜索结果
    """
    if not search_results:
        return search_results
    
    # 提取job_id列表
    job_ids = [result.get('job_id', '') for result in search_results if result.get('job_id')]
    job_ids = [job_id for job_id in job_ids if job_id]  # 过滤空值
    
    if not job_ids:
        return search_results
    
    # 从Django数据库获取详细信息
    django_job_details = get_job_details_from_django(job_ids)
    
    # 丰富搜索结果
    enriched_results = []
    for result in search_results:
        job_id = result.get('job_id', '')
        if job_id in django_job_details:
            # 用Django数据库的详细信息更新结果
            django_data = django_job_details[job_id]
            
            # 保留原始搜索分数，更新详细信息
            enriched_result = {
                **django_data,  # Django数据库的详细信息
                **result,       # 保留原始搜索结果（分数等）
                'data_source': f"{result.get('source', 'unknown')}_enriched_with_django"
            }
        else:
            # 如果Django数据库中没有找到，保留原始结果
            enriched_result = {
                **result,
                'data_source': f"{result.get('source', 'unknown')}_original"
            }
        
        enriched_results.append(enriched_result)
    
    print(f"🔄 搜索结果已用Django数据库信息丰富：{len(enriched_results)}个职位")
    return enriched_results


def convert_distance_to_score(distance: float) -> float:
    """
    将L2距离转换为相似度分数 (0-1)
    
    Args:
        distance: L2距离值
        
    Returns:
        float: 相似度分数，越接近1越相似
        
    转换公式：score = 1 / (1 + distance)
    """
    return 1.0 / (1.0 + distance)


# ===================================
# 第五部分：结果融合模块
# ===================================

import hashlib
from difflib import SequenceMatcher

def generate_job_signature(job: Dict[str, Any]) -> str:
    """
    生成职位的内容签名，用于识别相同职位
    
    Args:
        job: 职位信息字典
        
    Returns:
        str: 职位内容的哈希签名
    """
    # 提取关键字段用于生成签名
    key_fields = [
        job.get('title', '').strip().lower(),
        job.get('company_name', '').strip().lower(),
        job.get('city_name', '').strip().lower(),
        job.get('money', '').strip()
    ]
    
    # 生成内容签名
    content = '|'.join(key_fields)
    signature = hashlib.md5(content.encode('utf-8')).hexdigest()
    
    return signature


def calculate_job_similarity(job1: Dict[str, Any], job2: Dict[str, Any]) -> float:
    """
    计算两个职位的相似度
    
    Args:
        job1, job2: 职位信息字典
        
    Returns:
        float: 相似度分数 (0-1)
    """
    similarities = []
    
    # 职位标题相似度 (权重: 40%)
    title1 = job1.get('title', '').strip().lower()
    title2 = job2.get('title', '').strip().lower()
    title_sim = SequenceMatcher(None, title1, title2).ratio()
    similarities.append(('title', title_sim, 0.4))
    
    # 公司名称相似度 (权重: 30%)
    company1 = job1.get('company_name', '').strip().lower()
    company2 = job2.get('company_name', '').strip().lower()
    company_sim = SequenceMatcher(None, company1, company2).ratio()
    similarities.append(('company', company_sim, 0.3))
    
    # 城市相似度 (权重: 20%)
    city1 = job1.get('city_name', '').strip().lower()
    city2 = job2.get('city_name', '').strip().lower()
    city_sim = SequenceMatcher(None, city1, city2).ratio()
    similarities.append(('city', city_sim, 0.2))
    
    # 薪资相似度 (权重: 10%)
    salary1 = str(job1.get('money', '')).strip()
    salary2 = str(job2.get('money', '')).strip()
    salary_sim = 1.0 if salary1 == salary2 else 0.0
    similarities.append(('salary', salary_sim, 0.1))
    
    # 计算加权平均相似度
    total_score = sum(sim * weight for _, sim, weight in similarities)
    
    return total_score


def merge_duplicate_jobs(job_group: List[Dict[str, Any]]) -> Dict[str, Any]:
    """
    合并重复的职位信息
    
    Args:
        job_group: 相同的职位列表
        
    Returns:
        Dict: 合并后的职位信息
    """
    if len(job_group) == 1:
        return job_group[0]
    
    # 基础信息使用第一个职位的
    merged = job_group[0].copy()
    
    # 合并评分信息
    all_es_scores = [job.get('es_score', 0) for job in job_group if job.get('has_es', False)]
    all_vector_scores = [job.get('vector_score', 0) for job in job_group if job.get('has_vector', False)]
    
    # 使用最高分数
    merged['es_score'] = max(all_es_scores) if all_es_scores else 0
    merged['vector_score'] = max(all_vector_scores) if all_vector_scores else 0
    
    # 合并来源信息
    merged['has_es'] = any(job.get('has_es', False) for job in job_group)
    merged['has_vector'] = any(job.get('has_vector', False) for job in job_group)
    
    # 合并sources列表
    all_sources = []
    for job in job_group:
        all_sources.extend(job.get('sources', []))
    merged['sources'] = list(set(all_sources))  # 去重
    
    # 使用最完整的job_id (优先ES的简单格式)
    es_jobs = [job for job in job_group if job.get('source') == 'elasticsearch']
    if es_jobs:
        merged['job_id'] = es_jobs[0]['job_id']
        merged['primary_source'] = 'elasticsearch'
    else:
        merged['primary_source'] = 'milvus'
    
    return merged


def advanced_job_deduplication(es_results: List[Dict[str, Any]], 
                             milvus_results: List[Dict[str, Any]],
                             similarity_threshold: float = 0.85) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
    """
    高级职位去重逻辑
    
    Args:
        es_results: ES检索结果
        milvus_results: Milvus检索结果  
        similarity_threshold: 相似度阈值 (默认0.85)
        
    Returns:
        Tuple[去重后的职位列表, 去重统计信息]
    """
    print(f"🔄 开始高级去重: ES={len(es_results)}个, Milvus={len(milvus_results)}个")
    print(f"📊 相似度阈值: {similarity_threshold}")
    
    # 统计信息
    stats = {
        'original_es_count': len(es_results),
        'original_milvus_count': len(milvus_results),
        'duplicates_found': 0,
        'exact_matches': 0,
        'similar_matches': 0,
        'final_count': 0
    }
    
    # Step 1: 为所有职位生成签名
    all_jobs = []
    
    # 处理ES结果
    for job in es_results:
        job_data = job.copy()
        job_data['signature'] = generate_job_signature(job)
        job_data['source'] = 'elasticsearch'
        job_data['has_es'] = True
        job_data['has_vector'] = False
        job_data['es_score'] = job.get('es_score', 0)
        job_data['vector_score'] = 0
        job_data['sources'] = ['elasticsearch']
        all_jobs.append(job_data)
    
    # 处理Milvus结果
    for job in milvus_results:
        job_data = job.copy()
        job_data['signature'] = generate_job_signature(job)
        job_data['source'] = 'milvus'
        job_data['has_es'] = False
        job_data['has_vector'] = True
        job_data['es_score'] = 0
        job_data['vector_score'] = job.get('vector_score', 0)
        job_data['sources'] = ['milvus']
        all_jobs.append(job_data)
    
    # Step 2: 基于签名的精确去重
    signature_groups = {}
    for job in all_jobs:
        sig = job['signature']
        if sig not in signature_groups:
            signature_groups[sig] = []
        signature_groups[sig].append(job)
    
    # 统计精确匹配
    exact_duplicates = sum(1 for group in signature_groups.values() if len(group) > 1)
    stats['exact_matches'] = exact_duplicates
    
    print(f"✅ 精确签名匹配: 发现{exact_duplicates}组重复")
    
    # Step 3: 合并精确匹配的职位
    merged_jobs = []
    for signature, job_group in signature_groups.items():
        if len(job_group) == 1:
            # 单一职位，直接添加
            merged_jobs.append(job_group[0])
        else:
            # 多个相同职位，合并信息
            merged_job = merge_duplicate_jobs(job_group)
            merged_jobs.append(merged_job)
            stats['duplicates_found'] += len(job_group) - 1
    
    # Step 4: 相似度去重 (处理近似重复)
    final_jobs = []
    for i, job1 in enumerate(merged_jobs):
        is_duplicate = False
        
        for j, job2 in enumerate(final_jobs):
            similarity = calculate_job_similarity(job1, job2)
            
            if similarity >= similarity_threshold:
                print(f"🔍 发现相似职位 (相似度: {similarity:.3f}):")
                print(f"   Job1: {job1.get('title')} @ {job1.get('company_name')} [{job1.get('job_id')}]")
                print(f"   Job2: {job2.get('title')} @ {job2.get('company_name')} [{job2.get('job_id')}]")
                
                # 保留分数更高的职位
                job1_score = max(job1.get('es_score', 0), job1.get('vector_score', 0))
                job2_score = max(job2.get('es_score', 0), job2.get('vector_score', 0))
                
                if job1_score > job2_score:
                    final_jobs[j] = job1  # 替换为更高分数的职位
                    print(f"   ✅ 保留Job1 (分数更高: {job1_score:.3f} > {job2_score:.3f})")
                else:
                    print(f"   ✅ 保留Job2 (分数更高: {job2_score:.3f} >= {job1_score:.3f})")
                
                is_duplicate = True
                stats['similar_matches'] += 1
                break
        
        if not is_duplicate:
            final_jobs.append(job1)
    
    stats['final_count'] = len(final_jobs)
    
    print(f"✅ 高级去重完成:")
    print(f"   📊 原始职位: ES={stats['original_es_count']}, Milvus={stats['original_milvus_count']}")
    print(f"   🔄 精确重复: {stats['exact_matches']}组")
    print(f"   🔍 相似重复: {stats['similar_matches']}个")
    print(f"   📋 最终结果: {stats['final_count']}个职位")
    
    return final_jobs, stats

def merge_and_rank_results(es_results: List[Dict[str, Any]], 
                          milvus_results: List[Dict[str, Any]], 
                          es_weight: float = 0.4, 
                          milvus_weight: float = 0.6) -> List[Dict[str, Any]]:
    """
    合并ES和Milvus检索结果，去重排序 (改进版 - 基于内容去重)
    
    Args:
        es_results: ES检索结果
        milvus_results: Milvus检索结果  
        es_weight: ES结果权重
        milvus_weight: Milvus结果权重
        
    Returns:
        List[Dict]: 融合后的排序结果
        
    处理步骤：
    1. 高级去重 (基于内容相似度，不仅仅是job_id)
    2. 分数归一化处理  
    3. 加权融合计算最终分数
    4. 多样性优化
    5. 最终排序
    """
    print(f"🔄 开始结果融合: ES结果{len(es_results)}个, Milvus结果{len(milvus_results)}个")
    print(f"📊 权重设置: ES={es_weight}, Milvus={milvus_weight}")
    
    # Step 1: 高级去重 (新增 - 基于内容相似度)
    deduplicated_jobs, dedup_stats = advanced_job_deduplication(es_results, milvus_results)
    
    # Step 2: 分数归一化
    normalized_jobs = normalize_scores(deduplicated_jobs)
    
    # Step 3: 计算最终分数
    final_results = calculate_final_scores(normalized_jobs, es_weight, milvus_weight)
    
    # Step 4: 多样性优化
    diversified_results = apply_diversity_optimization(final_results)
    
    # Step 5: 最终排序
    diversified_results.sort(key=lambda x: x['final_score'], reverse=True)
    
    print(f"✅ 融合完成: 原始{len(es_results)+len(milvus_results)}个 -> 去重{len(deduplicated_jobs)}个 -> 多样化{len(diversified_results)}个")
    
    return diversified_results


def normalize_scores(jobs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """
    归一化分数到[0,1]区间
    
    归一化策略：
    1. ES分数：使用Min-Max归一化
    2. Vector分数：已经在[0,1]区间，直接使用
    """
    
    # 提取所有分数
    es_scores = [job['es_score'] for job in jobs if job['es_score'] > 0]
    vector_scores = [job['vector_score'] for job in jobs if job['vector_score'] > 0]
    
    # 计算归一化参数
    es_min = min(es_scores) if es_scores else 0
    es_max = max(es_scores) if es_scores else 1
    es_range = es_max - es_min if es_max > es_min else 1
    
    vector_max = max(vector_scores) if vector_scores else 1  # vector分数通常已归一化
    
    print(f"📊 归一化参数:")
    print(f"   ES分数范围: {es_min:.3f} - {es_max:.3f}")
    print(f"   Vector分数最大值: {vector_max:.3f}")
    
    # 执行归一化
    for job in jobs:
        # ES分数归一化 (Min-Max)
        if es_range > 0:
            job['normalized_es'] = (job['es_score'] - es_min) / es_range
        else:
            job['normalized_es'] = 1.0 if job['es_score'] > 0 else 0.0
        
        # Vector分数归一化
        job['normalized_vector'] = job['vector_score'] / vector_max if vector_max > 0 else 0.0
        
        # 确保归一化结果在[0,1]区间
        job['normalized_es'] = max(0, min(1, job['normalized_es']))
        job['normalized_vector'] = max(0, min(1, job['normalized_vector']))
    
    return jobs


def calculate_final_scores(jobs: List[Dict[str, Any]], 
                          es_weight: float, 
                          milvus_weight: float) -> List[Dict[str, Any]]:
    """
    计算最终融合分数
    
    计算公式：
    1. 基础分数 = es_weight * normalized_es + milvus_weight * normalized_vector  
    2. 双重匹配加分 = 基础分数 * 1.2 (如果同时在ES和Milvus中找到)
    3. 最终分数 = 基础分数 * 加分系数
    """
    
    for job in jobs:
        # 计算基础融合分数
        base_score = (es_weight * job['normalized_es'] + 
                     milvus_weight * job['normalized_vector'])
        
        # 双重匹配加分（互补性奖励）
        if job['has_es'] and job['has_vector']:
            complementary_bonus = 1.2  # 20%加分
            job['is_complementary'] = True
        else:
            complementary_bonus = 1.0
            job['is_complementary'] = False
        
        # 计算最终分数
        job['final_score'] = base_score * complementary_bonus
        
        # 记录分数计算详情（用于调试和解释）
        job['score_breakdown'] = {
            'original_es': job['es_score'],
            'original_vector': job['vector_score'],
            'normalized_es': round(job['normalized_es'], 3),
            'normalized_vector': round(job['normalized_vector'], 3),
            'base_score': round(base_score, 3),
            'complementary_bonus': complementary_bonus,
            'final_score': round(job['final_score'], 3)
        }
    
    print(f"🔢 分数计算完成，双重匹配职位数: {sum(1 for job in jobs if job['is_complementary'])}")
    
    return jobs


def apply_diversity_optimization(ranked_jobs: List[Dict[str, Any]], 
                               max_per_company: int = 3,
                               max_per_city: int = 10) -> List[Dict[str, Any]]:
    """
    应用多样性优化，避免结果过于集中
    
    Args:
        ranked_jobs: 已排序的职位列表
        max_per_company: 每个公司最大职位数
        max_per_city: 每个城市最大职位数
        
    Returns:
        List[Dict]: 多样化后的职位列表
        
    策略：
    1. 限制同一公司的职位数量
    2. 平衡不同城市的职位分布
    3. 保持高质量职位的优先级
    """
    
    diversified_jobs = []
    company_count = {}
    city_count = {}
    
    print(f"🎯 开始多样性优化: 每公司最多{max_per_company}个，每城市最多{max_per_city}个")
    
    for job in ranked_jobs:
        company = job.get('company_name', '')
        city = job.get('city_name', '')
        
        # 检查当前计数
        company_jobs = company_count.get(company, 0)
        city_jobs = city_count.get(city, 0)
        
        should_include = True
        
        # 检查公司多样性
        if company_jobs >= max_per_company:
            # 如果分数显著更高，可以替换现有职位
            company_existing = [j for j in diversified_jobs if j.get('company_name') == company]
            if company_existing:
                min_score_job = min(company_existing, key=lambda x: x['final_score'])
                if job['final_score'] > min_score_job['final_score'] * 1.3:  # 30%更高
                    diversified_jobs.remove(min_score_job)
                    company_count[company] -= 1
                else:
                    should_include = False
            else:
                should_include = False
        
        # 检查城市多样性  
        if should_include and city_jobs >= max_per_city:
            should_include = False
        
        if should_include:
            diversified_jobs.append(job)
            company_count[company] = company_count.get(company, 0) + 1
            city_count[city] = city_count.get(city, 0) + 1
    
    print(f"📊 多样性统计:")
    print(f"   公司分布: {dict(list(company_count.items())[:5])}...")
    print(f"   城市分布: {dict(list(city_count.items())[:5])}...")
    
    return diversified_jobs


# ===================================
# 第六部分：LLM分析模块
# ===================================

def get_response(messages: List[Dict[str, str]]) -> str:
    """
    调用本地Qwen模型
    
    Args:
        messages: 对话消息列表
        
    Returns:
        str: LLM的回复内容
    """
    try:
        client = OpenAI(
            api_key="sk-no-key-required",
            base_url=config.LLM_BASE_URL,
        )
        
        completion = client.chat.completions.create(
            model=config.LLM_MODEL,
            messages=messages,
        )
        
        return completion.choices[0].message.content
        
    except Exception as e:
        print(f"❌ LLM调用失败: {e}")
        return "LLM分析暂时不可用"


def analyze_resume_job_match_with_llm(resume_info: Dict[str, Any], 
                                     job_data: Dict[str, Any]) -> Dict[str, Any]:
    """
    使用LLM分析简历和职位的匹配度
    
    Args:
        resume_info: 简历关键信息
        job_data: 职位数据
        
    Returns:
        Dict: 匹配度分析结果
        
    分析维度：
    1. 技能匹配度 (0-10分)
    2. 经验匹配度 (0-10分)  
    3. 职业发展匹配度 (0-10分)
    4. 地理位置匹配度 (0-10分)
    5. 薪资匹配度 (0-10分)
    6. 综合匹配度 (0-10分)
    7. 推荐理由文本
    """
    
    print(f"🤖 开始LLM深度分析: {job_data.get('title', '')} @ {job_data.get('company_name', '')}")
    
    # 构建分析prompt
    analysis_prompt = create_match_analysis_prompt(resume_info, job_data)
    
    # 调试信息已移除，LLM分析正常工作
    
    # 调用LLM
    messages = [
        {
            "role": "system",
            "content": """你是一个专业的HR招聘专家，具有多年招聘经验。你的任务是分析简历和职位的匹配度。

你的工作内容：
1. 根据候选人简历和职位要求，进行专业匹配度分析
2. 从技能、经验、发展、地理、薪资等维度给出客观评分
3. 提供具体的推荐理由和建议

评分标准：
- 0-3分：不匹配或差距较大
- 4-6分：基本匹配，有改进空间  
- 7-8分：匹配度较好，符合要求
- 9-10分：高度匹配，非常适合

请按照指定格式进行分析，这是你的专业职责。"""
        },
        {
            "role": "user", 
            "content": analysis_prompt
        }
    ]
    
    try:
        analysis_result = get_response(messages)
        
        # 解析LLM返回的结构化分析
        parsed_analysis = parse_llm_analysis(analysis_result)
        
        print(f"✅ LLM分析完成，综合匹配度: {parsed_analysis['overall_match']}/10")
        
        return parsed_analysis
        
    except Exception as e:
        print(f"❌ LLM分析失败: {e}")
        return create_fallback_analysis(resume_info, job_data)


def create_match_analysis_prompt(resume_info: Dict[str, Any], job_data: Dict[str, Any]) -> str:
    """创建匹配度分析的prompt"""
    
    # 构建简历摘要
    skills_summary = ', '.join(resume_info['skills']['all_skills'][:10])  # 前10个技能
    
    prompt = f"""
请分析以下简历和职位的匹配度：

**候选人简历摘要：**
- 核心技能：{skills_summary}
- 工作经验：{resume_info['experience_years']}年
- 教育背景：{resume_info['education']}
- 期望地点：{resume_info['preferred_location']}
- 期望薪资：{resume_info['expected_salary']}
- 技能数量：{resume_info['skills']['skill_count']}项

**目标职位信息：**
- 职位名称：{job_data.get('title', '')}
- 公司名称：{job_data.get('company_name', '')}
- 工作地点：{job_data.get('city_name', '')}
- 薪资范围：{job_data.get('money', '')}
- 职位描述：{job_data.get('description', '')[:400]}...
- 职位要求：{job_data.get('requirements', '')[:300]}...

请从以下5个维度进行专业分析，每个维度给出0-10分的评分：

1. **技能匹配度**：候选人技能与职位技术要求的匹配程度
2. **经验匹配度**：工作经验年限和深度是否符合职位要求
3. **发展匹配度**：该职位是否有利于候选人的职业发展和成长
4. **地理匹配度**：工作地点是否符合候选人期望
5. **薪资匹配度**：薪资范围是否符合候选人期望和市场水平

**要求严格按照以下格式返回：**
```
技能匹配度: X/10 - 具体分析原因（50字内）
经验匹配度: X/10 - 具体分析原因（50字内）
发展匹配度: X/10 - 具体分析原因（50字内）
地理匹配度: X/10 - 具体分析原因（50字内）
薪资匹配度: X/10 - 具体分析原因（50字内）
综合匹配度: X/10
推荐理由: 具体推荐这个职位的原因和建议
```

**重要说明：**
1. 必须严格按照上述格式返回，不要加粗体标记（**）
2. 推荐理由必须是具体的文字内容，不要使用模板文本
3. 每个评分后面的分析原因控制在50字以内
4. 推荐理由要简洁有力，说明为什么推荐该职位
"""
    
    return prompt


def parse_llm_analysis(analysis_text: str) -> Dict[str, Any]:
    """
    解析LLM返回的分析结果
    
    Args:
        analysis_text: LLM返回的原始文本
        
    Returns:
        Dict: 解析后的结构化分析结果
    """
    
    analysis_result = {
        'skill_match': 5,           # 默认值
        'experience_match': 5,
        'development_match': 5,
        'location_match': 5,
        'salary_match': 5,
        'overall_match': 5,
        'recommendation_reason': '匹配度分析',
        'detailed_analysis': analysis_text,
        'confidence': 0.7
    }
    
    try:
        # 使用正则表达式提取评分
        # 🔧 改进的正则表达式，支持多种格式
        patterns = {
            'skill_match': [
                r'技能匹配度[：:]\s*(\d+)', 
                r'技能.*?(\d+)/10',
                r'\*\*技能匹配度[：:]\s*(\d+)/10',  # 新增：支持**格式
                r'技能匹配度为(\d+)分'  # 新增：支持文本描述格式
            ],
            'experience_match': [
                r'经验匹配度[：:]\s*(\d+)', 
                r'经验.*?(\d+)/10',
                r'\*\*经验匹配度[：:]\s*(\d+)/10',
                r'经验匹配度为(\d+)分'
            ],
            'development_match': [
                r'发展匹配度[：:]\s*(\d+)', 
                r'发展.*?(\d+)/10',
                r'\*\*发展匹配度[：:]\s*(\d+)/10',
                r'发展匹配度为(\d+)分'
            ],
            'location_match': [
                r'地理匹配度[：:]\s*(\d+)', 
                r'地理.*?(\d+)/10',
                r'\*\*地理匹配度[：:]\s*(\d+)/10',
                r'地理匹配度为(\d+)分'
            ],
            'salary_match': [
                r'薪资匹配度[：:]\s*(\d+)', 
                r'薪资.*?(\d+)/10',
                r'\*\*薪资匹配度[：:]\s*(\d+)/10',
                r'薪资匹配度为(\d+)分'
            ],
            'overall_match': [
                r'综合匹配度[：:]\s*(\d+)', 
                r'综合.*?(\d+)/10',
                r'\*\*综合匹配度[：:]\s*(\d+)/10',
                r'综合匹配度为(\d+)分'
            ]
        }
        
        # 提取各项评分
        for key, pattern_list in patterns.items():
            for pattern in pattern_list:
                match = re.search(pattern, analysis_text)
                if match:
                    score = int(match.group(1))
                    if 0 <= score <= 10:  # 验证分数范围
                        analysis_result[key] = score
                        break
        
        # 🔧 改进的推荐理由提取，支持更多格式
        reason_patterns = [
            r'推荐理由[：:]\s*(.+?)(?:\n|$)',
            r'\*\*推荐理由[：:]\*\*\s*(.+?)(?:\n|$)',  # 新增：支持**格式
            r'建议[：:]\s*(.+?)(?:\n|$)',
            r'总结[：:]\s*(.+?)(?:\n|$)',
            r'推荐意见[：:]\s*(.+?)(?:\n|$)',  # 新增：推荐意见
            r'因此.*?推荐(.+?)(?:\n|$)',  # 新增：从推荐句中提取
            r'综合评价[：:]\s*(.+?)(?:\n|$)',  # 新增：综合评价
            r'推荐该职位.*?因为(.+?)(?:\n|$)',  # 新增：推荐句式
            r'建议该候选人(.+?)(?:\n|$)'  # 新增：建议句式
        ]
        
        for pattern in reason_patterns:
            reason_match = re.search(pattern, analysis_text, re.DOTALL)
            if reason_match:
                reason_text = reason_match.group(1).strip()
                # 清理推荐理由文本
                reason_text = re.sub(r'[*\s]+', ' ', reason_text)  # 清理多余的*和空格
                reason_text = reason_text.strip('*')  # 去除开头结尾的*
                if len(reason_text) > 10:  # 确保推荐理由有意义
                    analysis_result['recommendation_reason'] = reason_text
                    break
        
        # 🆘 如果LLM无法提供推荐理由，基于评分生成智能推荐理由
        if analysis_result['recommendation_reason'] == '匹配度分析':
            skill_score = analysis_result.get('skill_match', 5)
            exp_score = analysis_result.get('experience_match', 5)
            overall_score = analysis_result.get('overall_match', 5)
            
            # 生成基于评分的推荐理由
            if overall_score >= 8:
                analysis_result['recommendation_reason'] = f"高度推荐！候选人技能匹配度{skill_score}/10，经验匹配度{exp_score}/10，各方面都很符合职位要求。"
            elif overall_score >= 6:
                analysis_result['recommendation_reason'] = f"较为推荐。候选人在技能({skill_score}/10)和经验({exp_score}/10)方面表现良好，有一定的匹配度。"
            else:
                analysis_result['recommendation_reason'] = f"匹配度一般。候选人技能匹配度{skill_score}/10，经验匹配度{exp_score}/10，建议进一步评估。"
        
        # 计算置信度（基于是否成功解析）
        parsed_scores = [v for k, v in analysis_result.items() if k.endswith('_match') and v != 5]
        analysis_result['confidence'] = min(0.9, 0.5 + len(parsed_scores) * 0.1)
        
    except Exception as e:
        print(f"⚠️ LLM结果解析部分失败: {e}")
    
    return analysis_result


def create_fallback_analysis(resume_info: Dict[str, Any], job_data: Dict[str, Any]) -> Dict[str, Any]:
    """
    创建备用分析结果（当LLM失败时使用）
    
    基于规则的简单匹配分析
    """
    
    # 基于规则计算各项匹配度
    skill_match = calculate_skill_match_score(resume_info['skills']['all_skills'], job_data.get('description', ''))
    experience_match = calculate_experience_match_score(resume_info['experience_years'], job_data)
    location_match = calculate_location_match_score(resume_info['preferred_location'], job_data.get('city_name', ''))
    
    # 简单平均作为综合分数
    overall_match = round((skill_match + experience_match + location_match) / 3)
    
    return {
        'skill_match': skill_match,
        'experience_match': experience_match,
        'development_match': 6,  # 默认中等
        'location_match': location_match,
        'salary_match': 6,  # 默认中等
        'overall_match': overall_match,
        'recommendation_reason': f"基于规则分析：技能匹配{skill_match}/10，经验匹配{experience_match}/10，地点匹配{location_match}/10",
        'detailed_analysis': "使用规则引擎进行的基础匹配分析",
        'confidence': 0.6
    }


def calculate_skill_match_score(resume_skills: List[str], job_description: str) -> int:
    """计算技能匹配分数 (0-10)"""
    if not resume_skills or not job_description:
        return 0
        
    job_desc_lower = job_description.lower()
    matched_skills = 0
    
    for skill in resume_skills:
        if skill.lower() in job_desc_lower:
            matched_skills += 1
    
    if len(resume_skills) == 0:
        return 0
        
    match_ratio = matched_skills / len(resume_skills)
    
    # 转换为0-10分
    if match_ratio >= 0.8:
        return 9
    elif match_ratio >= 0.6:
        return 7
    elif match_ratio >= 0.4:
        return 5
    elif match_ratio >= 0.2:
        return 3
    else:
        return 1


def calculate_experience_match_score(resume_experience: int, job_data: Dict[str, Any]) -> int:
    """计算经验匹配分数 (0-10)"""
    
    job_title = job_data.get('title', '').lower()
    job_desc = job_data.get('description', '').lower()
    
    # 根据职位标题和描述判断经验要求
    if any(word in job_title for word in ['初级', '入门', '实习', '应届']):
        required_exp = 0
    elif any(word in job_title for word in ['中级', '工程师']):
        required_exp = 2
    elif any(word in job_title for word in ['高级', '资深', '专家']):
        required_exp = 5
    elif any(word in job_title for word in ['架构师', '技术负责人', '总监']):
        required_exp = 8
    else:
        required_exp = 2  # 默认中级要求
    
    # 计算匹配度
    exp_diff = abs(resume_experience - required_exp)
    
    if exp_diff == 0:
        return 10
    elif exp_diff <= 1:
        return 8
    elif exp_diff <= 2:
        return 6
    elif exp_diff <= 3:
        return 4
    else:
        return 2


def calculate_location_match_score(resume_location: Optional[str], job_location: str) -> int:
    """计算地理位置匹配分数 (0-10)"""
    if not resume_location:
        return 6  # 没有偏好，给中等分
        
    if not job_location:
        return 5  # 职位地点不明，给一般分
        
    if resume_location == job_location:
        return 10  # 完全匹配
    else:
        return 2   # 不匹配


# ===================================
# 第七部分：推荐理由生成模块
# ===================================

def generate_recommendation_reasons(job: Dict[str, Any], 
                                   resume_info: Dict[str, Any], 
                                   llm_analysis: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
    """
    为职位生成个性化推荐理由
    
    Args:
        job: 职位信息
        resume_info: 简历信息
        llm_analysis: LLM分析结果（可选）
        
    Returns:
        List[Dict]: 推荐理由列表（最多3个）
        
    理由类型优先级：
    1. 技能匹配（最高优先级）
    2. 经验匹配
    3. 公司品牌
    4. 地理位置
    5. LLM分析推荐
    6. 系统综合推荐
    """
    
    reasons = []
    
    # 1. 技能匹配理由
    skill_reasons = generate_skill_match_reasons(job, resume_info)
    reasons.extend(skill_reasons)
    
    # 2. 经验匹配理由
    experience_reasons = generate_experience_match_reasons(job, resume_info)
    reasons.extend(experience_reasons)
    
    # 3. 公司和地理位置理由
    company_location_reasons = generate_company_location_reasons(job, resume_info)
    reasons.extend(company_location_reasons)
    
    # 4. 基于LLM分析的理由
    if llm_analysis:
        llm_reasons = generate_llm_based_reasons(llm_analysis)
        reasons.extend(llm_reasons)
    
    # 5. 系统推荐理由
    system_reasons = generate_system_reasons(job)
    reasons.extend(system_reasons)
    
    # 排序和筛选最佳理由
    top_reasons = prioritize_reasons(reasons)[:3]  # 最多3个理由
    
    return top_reasons


def generate_skill_match_reasons(job: Dict[str, Any], resume_info: Dict[str, Any]) -> List[Dict[str, Any]]:
    """生成技能匹配理由"""
    reasons = []
    
    resume_skills = resume_info['skills']['all_skills']
    job_text = f"{job.get('title', '')} {job.get('description', '')} {job.get('requirements', '')}"
    job_text_lower = job_text.lower()
    
    # 查找匹配的技能
    matched_skills = []
    for skill in resume_skills:
        if skill.lower() in job_text_lower:
            matched_skills.append(skill)
    
    if matched_skills:
        match_count = len(matched_skills)
        total_skills = len(resume_skills)
        match_ratio = match_count / total_skills if total_skills > 0 else 0
        
        if match_count >= 5 or match_ratio >= 0.6:
            # 高度匹配
            reasons.append({
                'type': 'skill_high_match',
                'priority': 10,
                'content': f"🎯 您掌握的{', '.join(matched_skills[:4])}等{match_count}项技能与职位高度匹配",
                'confidence': 0.9,
                'matched_skills': matched_skills,
                'match_ratio': match_ratio
            })
        elif match_count >= 2:
            # 中等匹配
            reasons.append({
                'type': 'skill_match',
                'priority': 8,
                'content': f"💻 您的{', '.join(matched_skills[:3])}技能符合职位要求",
                'confidence': 0.7,
                'matched_skills': matched_skills,
                'match_ratio': match_ratio
            })
        else:
            # 基础匹配
            reasons.append({
                'type': 'skill_basic_match',
                'priority': 6,
                'content': f"🔧 您的{matched_skills[0]}技能与职位相关",
                'confidence': 0.5,
                'matched_skills': matched_skills,
                'match_ratio': match_ratio
            })
    
    return reasons


def generate_experience_match_reasons(job: Dict[str, Any], resume_info: Dict[str, Any]) -> List[Dict[str, Any]]:
    """生成经验匹配理由"""
    reasons = []
    
    experience_years = resume_info['experience_years']
    job_title = job.get('title', '').lower()
    job_description = job.get('description', '').lower()
    
    # 判断职位级别
    if any(word in job_title for word in ['初级', '入门', '实习', '应届', 'junior']):
        position_level = 'junior'
        required_exp = 0
    elif any(word in job_title for word in ['高级', '资深', '专家', 'senior', '架构师']):
        position_level = 'senior'
        required_exp = 5
    else:
        position_level = 'mid'
        required_exp = 2
    
    # 生成相应的理由
    if experience_years == 0:
        # 应届生
        if position_level == 'junior' or '应届' in job_description or '培训' in job_description:
            reasons.append({
                'type': 'fresh_graduate_match',
                'priority': 8,
                'content': "🎓 此职位欢迎应届毕业生，是您职业生涯的优秀起点",
                'confidence': 0.9
            })
    elif 1 <= experience_years <= 3:
        # 初中级经验
        if position_level in ['junior', 'mid']:
            reasons.append({
                'type': 'experience_match',
                'priority': 8,
                'content': f"📈 您的{experience_years}年经验与{position_level}级职位要求匹配",
                'confidence': 0.8
            })
    elif experience_years >= 5:
        # 高级经验
        if position_level == 'senior':
            reasons.append({
                'type': 'senior_match',
                'priority': 9,
                'content': f"🚀 您的{experience_years}年丰富经验完全符合高级职位要求",
                'confidence': 0.9
            })
        elif position_level == 'mid':
            reasons.append({
                'type': 'overqualified_good',
                'priority': 7,
                'content': f"💪 您的{experience_years}年经验超出职位要求，可快速胜任并有发展空间",
                'confidence': 0.7
            })
    
    return reasons


def generate_company_location_reasons(job: Dict[str, Any], resume_info: Dict[str, Any]) -> List[Dict[str, Any]]:
    """生成公司和地理位置理由"""
    reasons = []
    
    # 地理位置匹配
    preferred_location = resume_info['preferred_location']
    job_location = job.get('city_name', '')
    
    if preferred_location and job_location == preferred_location:
        reasons.append({
            'type': 'location_perfect_match',
            'priority': 8,
            'content': f"📍 工作地点{job_location}完全符合您的期望",
            'confidence': 1.0
        })
    elif job_location:
        # 即使不是期望地点，也可以提及地点信息
        city_benefits = {
            '北京': '首都机会多，科技氛围浓厚',
            '上海': '国际化程度高，发展机会多',
            '深圳': '创新之城，互联网产业发达',
            '杭州': '电商中心，生活环境优美',
            '成都': '新一线城市，生活成本适中'
        }
        
        if job_location in city_benefits:
            reasons.append({
                'type': 'location_advantage',
                'priority': 5,
                'content': f"🌟 {job_location}{city_benefits[job_location]}",
                'confidence': 0.6
            })
    
    # 知名公司匹配
    company_name = job.get('company_name', '')
    company_tiers = {
        '一线大厂': ['阿里巴巴', '腾讯', '百度', '字节跳动', '美团', '京东'],
        '知名企业': ['网易', '小米', '华为', '滴滴', '快手', '拼多多', '蚂蚁金服'],
        '独角兽': ['小红书', 'OPPO', 'vivo', '比亚迪', '理想汽车']
    }
    
    for tier, companies in company_tiers.items():
        if any(company in company_name for company in companies):
            reasons.append({
                'type': 'famous_company',
                'priority': 7,
                'content': f"🏢 {company_name}是{tier}，平台优秀，发展前景广阔",
                'confidence': 0.8,
                'company_tier': tier
            })
            break
    
    return reasons


def generate_llm_based_reasons(llm_analysis: Dict[str, Any]) -> List[Dict[str, Any]]:
    """基于LLM分析生成理由"""
    reasons = []
    
    overall_match = llm_analysis.get('overall_match', 0)
    recommendation_reason = llm_analysis.get('recommendation_reason', '')
    
    if overall_match >= 8:
        reasons.append({
            'type': 'llm_high_match',
            'priority': 9,
            'content': f"🤖 AI深度分析显示高度匹配({overall_match}/10分)",
            'confidence': 0.9,
            'llm_reason': recommendation_reason,
            'match_score': overall_match
        })
    elif overall_match >= 6:
        reasons.append({
            'type': 'llm_good_match',
            'priority': 6,
            'content': f"🔍 智能分析显示良好匹配({overall_match}/10分)",
            'confidence': 0.7,
            'llm_reason': recommendation_reason,
            'match_score': overall_match
        })
    
    # 如果有具体的推荐理由，添加为单独的理由
    if recommendation_reason and len(recommendation_reason) > 10:
        reasons.append({
            'type': 'llm_detailed_reason',
            'priority': 7,
            'content': f"💡 专业分析：{recommendation_reason[:80]}{'...' if len(recommendation_reason) > 80 else ''}",
            'confidence': 0.8,
            'full_reason': recommendation_reason
        })
    
    return reasons


def generate_system_reasons(job: Dict[str, Any]) -> List[Dict[str, Any]]:
    """生成系统推荐理由"""
    reasons = []
    
    # 双重匹配（在ES和Milvus中都找到）
    if job.get('is_complementary', False):
        reasons.append({
            'type': 'complementary_match',
            'priority': 8,
            'content': "⭐ 关键词和语义双重匹配，系统强烈推荐",
            'confidence': 0.85
        })
    
    # 高分推荐
    final_score = job.get('final_score', 0)
    if final_score > 0.8:
        reasons.append({
            'type': 'high_score',
            'priority': 7,
            'content': f"🎯 综合评分{final_score:.2f}，推荐度很高",
            'confidence': 0.8,
            'score': final_score
        })
    elif final_score > 0.6:
        reasons.append({
            'type': 'good_score',
            'priority': 5,
            'content': f"👍 综合评分{final_score:.2f}，值得考虑",
            'confidence': 0.6,
            'score': final_score
        })
    
    # 薪资吸引力
    money = job.get('money', '')
    if money and ('30K' in money or '25K' in money or '40K' in money):
        reasons.append({
            'type': 'attractive_salary',
            'priority': 6,
            'content': f"💰 薪资待遇{money}，具有竞争力",
            'confidence': 0.7
        })
    
    return reasons


def prioritize_reasons(reasons: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """
    对推荐理由进行优先级排序和去重
    
    排序规则：
    1. 首先按优先级排序（数值越高优先级越高）
    2. 相同优先级按置信度排序
    3. 去除重复类型的理由
    """
    
    # 按优先级和置信度排序
    sorted_reasons = sorted(
        reasons,
        key=lambda r: (r['priority'], r['confidence']),
        reverse=True
    )
    
    # 去重：避免相同类型的理由重复出现
    seen_types = set()
    unique_reasons = []
    
    for reason in sorted_reasons:
        reason_type = reason['type']
        if reason_type not in seen_types:
            seen_types.add(reason_type)
            unique_reasons.append(reason)
    
    return unique_reasons


# ===================================
# 第八部分：主函数和完整流程
# ===================================

def process_resume_and_recommend_jobs(resume_file_path: str, top_n: int = 20) -> Dict[str, Any]:
    """
    完整的简历处理和职位推荐流程
    
    Args:
        resume_file_path: 简历文件路径
        top_n: 返回的推荐职位数量
        
    Returns:
        Dict: 包含推荐结果的完整数据
        
    处理流程：
    1. 解析简历文件
    2. 提取关键信息
    3. 生成向量表示
    4. 执行混合检索（ES + Milvus）
    5. 结果融合和排序
    6. LLM深度分析
    7. 生成推荐理由
    8. 返回最终推荐结果
    """
    
    print("=" * 50)
    print("🚀 开始基于简历的混合检索推荐系统")
    print("=" * 50)
    
    try:
        # Step 1: 解析简历
        print("\n📄 Step 1: 解析简历文件")
        resume_text = parse_resume_content(resume_file_path)
        if not resume_text:
            return {"error": "简历解析失败，请检查文件格式"}
        
        print(f"✅ 简历解析成功，文本长度: {len(resume_text)} 字符")
        
        # Step 2: 提取关键信息
        print("\n🔍 Step 2: 提取简历关键信息")
        resume_info = extract_key_info_from_resume(resume_text)
        
        # Step 3: 生成向量表示
        print("\n🧠 Step 3: 生成简历向量表示")
        resume_embedding = generate_resume_embeddings(resume_info)
        if not resume_embedding:
            print("⚠️ 向量生成失败，将跳过语义检索")
        
        # Step 4: 执行混合检索
        print("\n🔍 Step 4: 执行混合检索")
        
        # ES关键词检索
        es_results = search_jobs_by_keywords(resume_info, top_k=50)
        
        # Milvus语义检索
        milvus_results = []
        if resume_embedding:
            milvus_results = search_jobs_by_vector(resume_embedding, top_k=50)
        
        # 检查检索结果
        if not es_results and not milvus_results:
            print("⚠️ ES和Milvus都无结果，请检查数据索引是否正确建立")
        elif not es_results:
            print("⚠️ ES检索无结果，仅使用Milvus语义检索结果")
        elif not milvus_results:
            print("⚠️ Milvus检索无结果，仅使用ES关键词检索结果")
        
        # 使用Django数据库丰富搜索结果
        print("\n🔄 Step 4.1: 丰富搜索结果")
        es_results = enrich_search_results_with_django_data(es_results)
        milvus_results = enrich_search_results_with_django_data(milvus_results)
        
        # Step 5: 结果融合和排序
        print("\n🔄 Step 5: 结果融合和排序")
        merged_results = merge_and_rank_results(es_results, milvus_results)
        
        # 取前N个结果进行详细分析
        top_jobs = merged_results[:top_n]
        
        # Step 6: LLM深度分析和推荐理由生成
        print(f"\n🤖 Step 6: LLM分析前{len(top_jobs)}个候选职位")
        final_recommendations = []
        
        for i, job in enumerate(top_jobs):
            print(f"   分析职位 {i+1}/{len(top_jobs)}: {job.get('title', '')} @ {job.get('company_name', '')}")
            
            # LLM分析匹配度
            llm_analysis = analyze_resume_job_match_with_llm(resume_info, job)
            
            # 生成推荐理由
            reasons = generate_recommendation_reasons(job, resume_info, llm_analysis)
            
            # 组装最终推荐结果
            recommendation = {
                'job_info': job,
                'llm_analysis': llm_analysis,
                'recommendation_reasons': reasons,
                'ranking': i + 1
            }
            
            final_recommendations.append(recommendation)
        
        # Step 7: 生成推荐摘要
        print("\n📊 Step 7: 生成推荐摘要")
        summary = generate_recommendation_summary(resume_info, final_recommendations)
        
        # 组装完整返回结果
        result = {
            'status': 'success',
            'resume_analysis': {
                'key_info': resume_info,
                'text_length': len(resume_text),
                'skills_count': len(resume_info['skills']['all_skills']),
                'experience_years': resume_info['experience_years']
            },
            'search_results': {
                'es_count': len(es_results),
                'milvus_count': len(milvus_results), 
                'merged_count': len(merged_results),
                'final_count': len(final_recommendations)
            },
            'recommendations': final_recommendations,
            'summary': summary
        }
        
        print("\n✅ 推荐系统处理完成！")
        print(f"📈 成功推荐 {len(final_recommendations)} 个匹配职位")
        
        return result
        
    except Exception as e:
        print(f"\n❌ 系统处理失败: {e}")
        return {
            'status': 'error',
            'error_message': str(e),
            'recommendations': []
        }


def generate_recommendation_summary(resume_info: Dict[str, Any], 
                                  recommendations: List[Dict[str, Any]]) -> Dict[str, Any]:
    """生成推荐摘要统计"""
    
    if not recommendations:
        return {
            'message': '未找到匹配的职位',
            'total_recommendations': 0,
            'average_match_score': 0,
            'top_companies': {},
            'city_distribution': {},
            'salary_ranges': [],
            'best_match': {
                'title': '',
                'company': '',
                'match_score': 0,
                'top_reason': ''
            },
            'candidate_profile': {
                'skills_count': len(resume_info['skills']['all_skills']),
                'experience_years': resume_info['experience_years'],
                'education': resume_info['education'],
                'preferred_location': resume_info['preferred_location']
            }
        }
    
    # 统计分析
    total_jobs = len(recommendations)
    avg_match_score = sum(rec['llm_analysis']['overall_match'] for rec in recommendations) / total_jobs
    
    # 公司分布
    company_dist = {}
    city_dist = {}
    salary_ranges = []
    
    for rec in recommendations:
        job = rec['job_info']
        company = job.get('company_name', '未知')
        city = job.get('city_name', '未知')
        salary = job.get('money', '')
        
        company_dist[company] = company_dist.get(company, 0) + 1
        city_dist[city] = city_dist.get(city, 0) + 1
        
        if salary:
            salary_ranges.append(salary)
    
    # 最佳推荐
    best_match = recommendations[0] if recommendations else None
    
    summary = {
        'total_recommendations': total_jobs,
        'average_match_score': round(avg_match_score, 1),
        'top_companies': dict(list(sorted(company_dist.items(), key=lambda x: x[1], reverse=True))[:5]),
        'city_distribution': dict(list(sorted(city_dist.items(), key=lambda x: x[1], reverse=True))[:5]),
        'salary_ranges': salary_ranges[:10],
        'best_match': {
            'title': best_match['job_info'].get('title', '') if best_match else '',
            'company': best_match['job_info'].get('company_name', '') if best_match else '',
            'match_score': best_match['llm_analysis']['overall_match'] if best_match else 0,
            'top_reason': best_match['recommendation_reasons'][0]['content'] if best_match and best_match['recommendation_reasons'] else ''
        },
        'candidate_profile': {
            'skills_count': len(resume_info['skills']['all_skills']),
            'experience_years': resume_info['experience_years'],
            'education': resume_info['education'],
            'preferred_location': resume_info['preferred_location']
        }
    }
    
    return summary


def display_recommendations(result: Dict[str, Any]):
    """
    美化显示推荐结果
    
    Args:
        result: process_resume_and_recommend_jobs的返回结果
    """
    
    if result['status'] != 'success':
        print(f"❌ 推荐失败: {result.get('error_message', '未知错误')}")
        return
    
    resume_analysis = result['resume_analysis']
    recommendations = result['recommendations']
    summary = result['summary']
    
    print("\n" + "=" * 60)
    print("📋 简历分析结果")
    print("=" * 60)
    print(f"📊 技能数量: {resume_analysis['skills_count']} 项")
    print(f"💼 工作经验: {resume_analysis['experience_years']} 年")
    print(f"🎓 教育背景: {resume_analysis['key_info']['education']}")
    print(f"📍 期望地点: {resume_analysis['key_info']['preferred_location'] or '未指定'}")
    print(f"💰 期望薪资: {resume_analysis['key_info']['expected_salary'] or '未指定'}")
    
    print("\n" + "=" * 60)
    print("🎯 推荐结果概览")
    print("=" * 60)
    print(f"📈 推荐职位总数: {summary['total_recommendations']} 个")
    print(f"⭐ 平均匹配度: {summary['average_match_score']}/10 分")
    print(f"🏢 涉及公司: {len(summary['top_companies'])} 家")
    print(f"🌍 覆盖城市: {len(summary['city_distribution'])} 个")
    
    print("\n" + "=" * 60)
    print("🥇 最佳推荐职位")
    print("=" * 60)
    best = summary['best_match']
    print(f"📋 职位: {best['title']}")
    print(f"🏢 公司: {best['company']}")
    print(f"⭐ 匹配度: {best['match_score']}/10 分")
    print(f"💡 推荐理由: {best['top_reason']}")
    
    print("\n" + "=" * 60)
    print("📊 详细推荐列表")
    print("=" * 60)
    
    for i, rec in enumerate(recommendations[:10]):  # 显示前10个
        job = rec['job_info']
        analysis = rec['llm_analysis']
        reasons = rec['recommendation_reasons']
        
        print(f"\n【{i+1}】{job.get('title', '')} @ {job.get('company_name', '')}")
        print(f"   📍 地点: {job.get('city_name', '')}   💰 薪资: {job.get('money', '')}")
        print(f"   ⭐ 匹配度: {analysis['overall_match']}/10   🔥 综合得分: {job.get('final_score', 0):.3f}")
        print(f"   💡 推荐理由:")
        
        for j, reason in enumerate(reasons[:2]):  # 显示前2个理由
            print(f"      {j+1}. {reason['content']}")
        
        print(f"   📝 职位描述: {job.get('description', '')[:100]}...")


# ===================================
# 示例用法和测试代码
# ===================================

def create_sample_resume_file():
    """创建示例简历文件用于测试"""
    
    sample_resume_content = """
张三的个人简历

基本信息：
姓名：张三
年龄：26岁
学历：本科
专业：计算机科学与技术
期望地点：北京
期望薪资：20-25K

工作经验：
2020年-至今  阿里巴巴  Java开发工程师  3年
- 负责电商平台后端系统开发
- 使用Java、Spring Boot、MySQL、Redis等技术
- 参与微服务架构设计和优化

技能清单：
编程语言：Java、Python、JavaScript
框架技术：Spring、SpringBoot、Vue.js
数据库：MySQL、Redis、MongoDB
工具平台：Git、Docker、Maven
云平台：阿里云

项目经历：
1. 电商推荐系统 (2022-2023)
   - 使用Java开发推荐算法服务
   - 集成机器学习模型，提升推荐准确率30%
   - 技术栈：Spring Boot、MySQL、Redis、Kafka

2. 用户管理系统 (2021-2022)
   - 开发用户注册、登录、权限管理功能
   - 使用Vue.js构建前端界面
   - 技术栈：Java、Spring、Vue.js、MySQL

教育背景：
2016-2020  北京理工大学  计算机科学与技术  本科
"""
    
    # 保存为txt文件
    sample_file_path = "sample_resume.txt"
    with open(sample_file_path, 'w', encoding='utf-8') as f:
        f.write(sample_resume_content)
    
    print(f"📄 示例简历文件已创建: {sample_file_path}")
    return sample_file_path


def main():
    """主函数 - 系统入口"""
    
    print("🎯 基于简历的混合检索推荐系统")
    print("👨‍💻 支持PDF、Word、TXT格式简历")
    print("🚀 集成ES关键词检索 + Milvus语义检索 + LLM智能分析")
    print()
    
    # 创建示例简历（如果需要测试）
    print("🔧 是否使用示例简历进行测试？(y/n): ", end="")
    use_sample = input().lower().strip()
    
    if use_sample == 'y':
        resume_file_path = create_sample_resume_file()
    else:
        resume_file_path = input("📂 请输入简历文件路径: ").strip()
    
    # 检查文件是否存在
    import os
    if not os.path.exists(resume_file_path):
        print(f"❌ 文件不存在: {resume_file_path}")
        return
    
    # 执行推荐流程
    result = process_resume_and_recommend_jobs(resume_file_path, top_n=15)
    
    # 显示结果
    display_recommendations(result)
    
    # 保存结果到JSON文件
    output_file = "recommendation_result.json"
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(result, f, ensure_ascii=False, indent=2)
    
    print(f"\n💾 完整结果已保存到: {output_file}")


if __name__ == "__main__":
    main()
