#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
改进的职位去重逻辑

问题分析：
当前系统仅基于job_id进行去重，但ES和Milvus中同一职位可能有不同的ID：
- ES: job_id=3 (JAVA工程师, 融金汇银, 上海, 18000)  
- Milvus: job_id=1_1, 1_0 (同样的JAVA工程师, 融金汇银, 上海, 18000)

解决方案：
1. 基于职位内容特征进行去重
2. 使用模糊匹配算法识别相似职位
3. 保留最高分数的版本
"""

import hashlib
from typing import List, Dict, Any, Tuple
from difflib import SequenceMatcher


def generate_job_signature(job: Dict[str, Any]) -> str:
    """
    生成职位的内容签名，用于识别相同职位
    
    Args:
        job: 职位信息字典
        
    Returns:
        str: 职位内容的哈希签名
    """
    # 提取关键字段用于生成签名
    key_fields = [
        job.get('title', '').strip().lower(),
        job.get('company_name', '').strip().lower(),
        job.get('city_name', '').strip().lower(),
        job.get('money', '').strip()
    ]
    
    # 生成内容签名
    content = '|'.join(key_fields)
    signature = hashlib.md5(content.encode('utf-8')).hexdigest()
    
    return signature


def calculate_job_similarity(job1: Dict[str, Any], job2: Dict[str, Any]) -> float:
    """
    计算两个职位的相似度
    
    Args:
        job1, job2: 职位信息字典
        
    Returns:
        float: 相似度分数 (0-1)
    """
    similarities = []
    
    # 职位标题相似度 (权重: 40%)
    title1 = job1.get('title', '').strip().lower()
    title2 = job2.get('title', '').strip().lower()
    title_sim = SequenceMatcher(None, title1, title2).ratio()
    similarities.append(('title', title_sim, 0.4))
    
    # 公司名称相似度 (权重: 30%)
    company1 = job1.get('company_name', '').strip().lower()
    company2 = job2.get('company_name', '').strip().lower()
    company_sim = SequenceMatcher(None, company1, company2).ratio()
    similarities.append(('company', company_sim, 0.3))
    
    # 城市相似度 (权重: 20%)
    city1 = job1.get('city_name', '').strip().lower()
    city2 = job2.get('city_name', '').strip().lower()
    city_sim = SequenceMatcher(None, city1, city2).ratio()
    similarities.append(('city', city_sim, 0.2))
    
    # 薪资相似度 (权重: 10%)
    salary1 = str(job1.get('money', '')).strip()
    salary2 = str(job2.get('money', '')).strip()
    salary_sim = 1.0 if salary1 == salary2 else 0.0
    similarities.append(('salary', salary_sim, 0.1))
    
    # 计算加权平均相似度
    total_score = sum(sim * weight for _, sim, weight in similarities)
    
    return total_score


def advanced_job_deduplication(es_results: List[Dict[str, Any]], 
                             milvus_results: List[Dict[str, Any]],
                             similarity_threshold: float = 0.85) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
    """
    高级职位去重逻辑
    
    Args:
        es_results: ES检索结果
        milvus_results: Milvus检索结果  
        similarity_threshold: 相似度阈值 (默认0.85)
        
    Returns:
        Tuple[去重后的职位列表, 去重统计信息]
    """
    print(f"🔄 开始高级去重: ES={len(es_results)}个, Milvus={len(milvus_results)}个")
    print(f"📊 相似度阈值: {similarity_threshold}")
    
    # 统计信息
    stats = {
        'original_es_count': len(es_results),
        'original_milvus_count': len(milvus_results),
        'duplicates_found': 0,
        'exact_matches': 0,
        'similar_matches': 0,
        'final_count': 0
    }
    
    # Step 1: 为所有职位生成签名
    all_jobs = []
    
    # 处理ES结果
    for job in es_results:
        job_data = job.copy()
        job_data['signature'] = generate_job_signature(job)
        job_data['source'] = 'elasticsearch'
        job_data['has_es'] = True
        job_data['has_vector'] = False
        job_data['es_score'] = job.get('es_score', 0)
        job_data['vector_score'] = 0
        job_data['sources'] = ['elasticsearch']
        all_jobs.append(job_data)
    
    # 处理Milvus结果
    for job in milvus_results:
        job_data = job.copy()
        job_data['signature'] = generate_job_signature(job)
        job_data['source'] = 'milvus'
        job_data['has_es'] = False
        job_data['has_vector'] = True
        job_data['es_score'] = 0
        job_data['vector_score'] = job.get('vector_score', 0)
        job_data['sources'] = ['milvus']
        all_jobs.append(job_data)
    
    # Step 2: 基于签名的精确去重
    signature_groups = {}
    for job in all_jobs:
        sig = job['signature']
        if sig not in signature_groups:
            signature_groups[sig] = []
        signature_groups[sig].append(job)
    
    # 统计精确匹配
    exact_duplicates = sum(1 for group in signature_groups.values() if len(group) > 1)
    stats['exact_matches'] = exact_duplicates
    
    print(f"✅ 精确签名匹配: 发现{exact_duplicates}组重复")
    
    # Step 3: 合并精确匹配的职位
    merged_jobs = []
    for signature, job_group in signature_groups.items():
        if len(job_group) == 1:
            # 单一职位，直接添加
            merged_jobs.append(job_group[0])
        else:
            # 多个相同职位，合并信息
            merged_job = merge_duplicate_jobs(job_group)
            merged_jobs.append(merged_job)
            stats['duplicates_found'] += len(job_group) - 1
    
    # Step 4: 相似度去重 (处理近似重复)
    final_jobs = []
    for i, job1 in enumerate(merged_jobs):
        is_duplicate = False
        
        for j, job2 in enumerate(final_jobs):
            similarity = calculate_job_similarity(job1, job2)
            
            if similarity >= similarity_threshold:
                print(f"🔍 发现相似职位 (相似度: {similarity:.3f}):")
                print(f"   Job1: {job1.get('title')} @ {job1.get('company_name')} [{job1.get('job_id')}]")
                print(f"   Job2: {job2.get('title')} @ {job2.get('company_name')} [{job2.get('job_id')}]")
                
                # 保留分数更高的职位
                job1_score = max(job1.get('es_score', 0), job1.get('vector_score', 0))
                job2_score = max(job2.get('es_score', 0), job2.get('vector_score', 0))
                
                if job1_score > job2_score:
                    final_jobs[j] = job1  # 替换为更高分数的职位
                    print(f"   ✅ 保留Job1 (分数更高: {job1_score:.3f} > {job2_score:.3f})")
                else:
                    print(f"   ✅ 保留Job2 (分数更高: {job2_score:.3f} >= {job1_score:.3f})")
                
                is_duplicate = True
                stats['similar_matches'] += 1
                break
        
        if not is_duplicate:
            final_jobs.append(job1)
    
    stats['final_count'] = len(final_jobs)
    
    print(f"✅ 高级去重完成:")
    print(f"   📊 原始职位: ES={stats['original_es_count']}, Milvus={stats['original_milvus_count']}")
    print(f"   🔄 精确重复: {stats['exact_matches']}组")
    print(f"   🔍 相似重复: {stats['similar_matches']}个")
    print(f"   📋 最终结果: {stats['final_count']}个职位")
    
    return final_jobs, stats


def merge_duplicate_jobs(job_group: List[Dict[str, Any]]) -> Dict[str, Any]:
    """
    合并重复的职位信息
    
    Args:
        job_group: 相同的职位列表
        
    Returns:
        Dict: 合并后的职位信息
    """
    if len(job_group) == 1:
        return job_group[0]
    
    # 基础信息使用第一个职位的
    merged = job_group[0].copy()
    
    # 合并评分信息
    all_es_scores = [job.get('es_score', 0) for job in job_group if job.get('has_es', False)]
    all_vector_scores = [job.get('vector_score', 0) for job in job_group if job.get('has_vector', False)]
    
    # 使用最高分数
    merged['es_score'] = max(all_es_scores) if all_es_scores else 0
    merged['vector_score'] = max(all_vector_scores) if all_vector_scores else 0
    
    # 合并来源信息
    merged['has_es'] = any(job.get('has_es', False) for job in job_group)
    merged['has_vector'] = any(job.get('has_vector', False) for job in job_group)
    
    # 合并sources列表
    all_sources = []
    for job in job_group:
        all_sources.extend(job.get('sources', []))
    merged['sources'] = list(set(all_sources))  # 去重
    
    # 使用最完整的job_id (优先ES的简单格式)
    es_jobs = [job for job in job_group if job.get('source') == 'elasticsearch']
    if es_jobs:
        merged['job_id'] = es_jobs[0]['job_id']
        merged['primary_source'] = 'elasticsearch'
    else:
        merged['primary_source'] = 'milvus'
    
    return merged


def test_deduplication():
    """测试去重逻辑"""
    
    # 模拟ES结果
    es_results = [
        {
            'job_id': '3',
            'title': 'JAVA工程师', 
            'company_name': '融金汇银',
            'city_name': '上海',
            'money': '18000',
            'description': 'Java开发工作...',
            'es_score': 29.46
        },
        {
            'job_id': '6',
            'title': 'JAVA工程师',
            'company_name': '融金汇银', 
            'city_name': '西安',
            'money': '17000',
            'description': '微服务开发...',
            'es_score': 41.68
        }
    ]
    
    # 模拟Milvus结果 (包含重复职位)
    milvus_results = [
        {
            'job_id': '1_1',
            'title': 'JAVA工程师',
            'company_name': '融金汇银',
            'city_name': '上海', 
            'money': '18000',
            'description': 'Java开发工作...',
            'vector_score': 0.564
        },
        {
            'job_id': '1_0', 
            'title': 'JAVA工程师',
            'company_name': '融金汇银',
            'city_name': '上海',
            'money': '18000', 
            'description': 'Java开发工作...',
            'vector_score': 0.550
        },
        {
            'job_id': '0_1',
            'title': 'Python工程师',
            'company_name': '融金汇银',
            'city_name': '北京',
            'money': '10000',
            'description': 'Python开发...',
            'vector_score': 0.509
        }
    ]
    
    print("🧪 测试高级去重逻辑")
    print("=" * 60)
    
    final_jobs, stats = advanced_job_deduplication(es_results, milvus_results)
    
    print(f"\n📊 去重结果:")
    for i, job in enumerate(final_jobs, 1):
        print(f"  【{i}】{job['title']} @ {job['company_name']} - {job['city_name']}")
        print(f"      job_id: {job['job_id']}")
        print(f"      来源: {', '.join(job['sources'])}")
        print(f"      ES分数: {job['es_score']}, Vector分数: {job['vector_score']}")
        print()


if __name__ == "__main__":
    test_deduplication()
