#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
参数优化工具
用于网格搜索最优参数组合以提高查重准确性
"""

import json
import random
from typing import List, Dict
import numpy as np
from core_elements_utils import extract_core_elements, calculate_jaccard_similarity, is_high_similarity_by_elements, filter_similar_titles
from paper_checker_chinese_optimized import text_to_vector, FaissStorage
import jieba


class ParameterOptimizer:
    def __init__(self):
        self.storage = FaissStorage()
        
    def create_validation_dataset(self) -> List[Dict]:
        """
        创建验证数据集，包含相似和不相似的题目对
        """
        # 原始题目样本
        original_titles = [
            "基于深度学习的图像识别技术研究",
            "机器学习在医疗诊断中的应用分析",
            "云计算环境下的数据安全保护机制",
            "神经网络在自然语言处理中的应用",
            "大数据分析平台的设计与实现",
            "基于LSTM的电商评论情感分析",
            "采用CNN的医学图像分类研究",
            "基于BERT的文本相似度计算方法",
            "支持向量机在股票预测中的应用",
            "随机森林算法在信用评估中的研究"
        ]
        
        # 相似题目对（通过改写生成）
        similar_pairs = []
        variations = [
            lambda x: x.replace("研究", "分析"),
            lambda x: x.replace("应用", "实践"),
            lambda x: x.replace("技术", "方法"),
            lambda x: x.replace("设计", "构建"),
            lambda x: "基于" + x,
            lambda x: x + "的优化",
            lambda x: x + "的改进"
        ]
        
        for title in original_titles:
            for variation_func in variations:
                if len(similar_pairs) < 500:
                    similar_pairs.append({
                        "title1": title,
                        "title2": variation_func(title),
                        "label": 1,  # 相似
                        "reason": "语义相同但表述略有不同"
                    })
        
        # 不相似题目对（随机组合）
        dissimilar_pairs = []
        for i in range(500):
            idx1 = random.randint(0, len(original_titles)-1)
            idx2 = random.randint(0, len(original_titles)-1)
            while idx2 == idx1:
                idx2 = random.randint(0, len(original_titles)-1)
                
            dissimilar_pairs.append({
                "title1": original_titles[idx1],
                "title2": original_titles[idx2],
                "label": 0,  # 不相似
                "reason": "主题完全不同"
            })
        
        # 合并并打乱数据集
        validation_dataset = similar_pairs + dissimilar_pairs
        random.shuffle(validation_dataset)
        
        return validation_dataset

    def evaluate_single_pair(self, title1: str, title2: str, params: Dict) -> bool:
        """
        评估单个题目对是否相似
        """
        try:
            # 获取title1的向量
            vector1 = text_to_vector(title1)
            
            # 搜索相似题目
            similar_results = self.storage.search_similar(
                vector1, 
                top_k=1, 
                query_title=title1 if params.get('use_core_elements', False) else None
            )
            
            # 检查title2是否在相似结果中
            found_similar = False
            max_similarity = 0
            for result in similar_results:
                if result["title"] == title2:
                    found_similar = True
                    max_similarity = result["similarity"]
                    break
            
            # 如果使用额外的过滤规则
            if params.get('use_core_elements', False) and params.get('check_methods_objects', False) and found_similar:
                if not is_high_similarity_by_elements(title1, title2):
                    found_similar = False  # 核心要素不匹配，视为不相似
            
            # 根据阈值判断是否相似
            is_similar = found_similar and max_similarity >= params.get('threshold', 0.6) * 100
            
            return is_similar
        except Exception as e:
            print(f"评估题目对 '{title1}' 和 '{title2}' 时出错: {e}")
            return False

    def evaluate_parameters(self, dataset: List[Dict], params: Dict) -> Dict:
        """
        使用指定参数评估模型性能
        """
        true_positives = 0
        false_positives = 0
        false_negatives = 0
        true_negatives = 0
        
        # 为数据集中的所有唯一题目生成向量并添加到索引中
        unique_titles = set()
        for pair in dataset:
            unique_titles.add(pair["title1"])
            unique_titles.add(pair["title2"])
        
        # 清空现有索引
        self.storage = FaissStorage()
        
        # 添加所有题目到索引
        title_to_id = {}
        for i, title in enumerate(unique_titles):
            try:
                vector = text_to_vector(title)
                self.storage.add_vector(vector, title)
                title_to_id[title] = i
            except Exception as e:
                print(f"处理题目 '{title}' 时出错: {e}")
        
        # 评估每对题目
        for pair in dataset:
            title1, title2, label = pair["title1"], pair["title2"], pair["label"]
            
            is_similar = self.evaluate_single_pair(title1, title2, params)
            
            # 更新混淆矩阵
            if label == 1 and is_similar:
                true_positives += 1
            elif label == 0 and is_similar:
                false_positives += 1
            elif label == 1 and not is_similar:
                false_negatives += 1
            elif label == 0 and not is_similar:
                true_negatives += 1
        
        # 计算评估指标
        try:
            precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0
            recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0
            f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
            accuracy = (true_positives + true_negatives) / (true_positives + false_positives + false_negatives + true_negatives)
        except ZeroDivisionError:
            precision = recall = f1_score = accuracy = 0
        
        return {
            "true_positives": true_positives,
            "false_positives": false_positives,
            "false_negatives": false_negatives,
            "true_negatives": true_negatives,
            "precision": precision,
            "recall": recall,
            "f1_score": f1_score,
            "accuracy": accuracy
        }

    def grid_search_parameters(self, dataset: List[Dict]) -> Dict:
        """
        网格搜索最优参数组合
        """
        # 定义参数搜索空间
        param_grid = {
            'use_core_elements': [True, False],
            'threshold': [0.5, 0.55, 0.6, 0.65, 0.7],
            'check_methods_objects': [True, False]
        }
        
        best_params = None
        best_f1 = -1
        best_results = None
        
        total_combinations = (len(param_grid['use_core_elements']) * 
                             len(param_grid['threshold']) * 
                             len(param_grid['check_methods_objects']))
        
        print(f"开始网格搜索，共 {total_combinations} 种参数组合...")
        
        for use_core_elements in param_grid['use_core_elements']:
            for threshold in param_grid['threshold']:
                for check_methods_objects in param_grid['check_methods_objects']:
                    params = {
                        'use_core_elements': use_core_elements,
                        'threshold': threshold,
                        'check_methods_objects': check_methods_objects
                    }
                    
                    print(f"测试参数组合: use_core_elements={use_core_elements}, threshold={threshold}, check_methods_objects={check_methods_objects}")
                    
                    results = self.evaluate_parameters(dataset, params)
                    f1 = results['f1_score']
                    
                    print(f"  结果: Precision={results['precision']:.4f}, Recall={results['recall']:.4f}, F1={f1:.4f}")
                    
                    if f1 > best_f1:
                        best_f1 = f1
                        best_params = params
                        best_results = results
        
        return {
            'best_params': best_params,
            'best_f1_score': best_f1,
            'best_results': best_results
        }

    def save_best_parameters(self, results: Dict, filename: str = "best_parameters.json"):
        """
        保存最优参数到文件
        """
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)
        print(f"最优参数已保存到 {filename}")

    def load_best_parameters(self, filename: str = "best_parameters.json") -> Dict:
        """
        从文件加载最优参数
        """
        try:
            with open(filename, 'r', encoding='utf-8') as f:
                return json.load(f)
        except FileNotFoundError:
            print(f"未找到参数文件 {filename}")
            return {}


def main():
    """
    主函数：执行参数优化流程
    """
    optimizer = ParameterOptimizer()
    
    print("创建验证数据集...")
    dataset = optimizer.create_validation_dataset()
    print(f"验证数据集创建完成，共 {len(dataset)} 对题目")
    
    print("\n开始参数网格搜索...")
    results = optimizer.grid_search_parameters(dataset)
    
    print("\n=== 最优参数组合 ===")
    print(f"参数: {results['best_params']}")
    print(f"最佳F1分数: {results['best_f1_score']:.4f}")
    print(f"详细结果: {results['best_results']}")
    
    # 保存最优参数
    optimizer.save_best_parameters(results)
    print("\n优化完成！")


if __name__ == "__main__":
    main()