import json
import csv
import time
import statistics
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass, asdict
from enum import Enum
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from datetime import datetime
import logging

logger = logging.getLogger(__name__)

class QuestionType(Enum):
    """问题类型分类"""
    FACTUAL = "factual"
    MULTI_HOP = "multi_hop"
    COMPOSITIONAL = "compositional"
    COMPARISON = "comparison"
    TEMPORAL = "temporal"
    NUMERICAL = "numerical"

@dataclass
class EvaluationMetrics:
    """评估指标"""
    accuracy: float = 0.0
    precision: float = 0.0
    recall: float = 0.0
    f1_score: float = 0.0
    response_time: float = 0.0
    reasoning_steps: int = 0
    confidence_score: float = 0.0
    
class QuestionAnnotation:
    """问题标注类"""
    
    def __init__(self, question: str, ground_truth: str, question_type: QuestionType,
                 difficulty: int = 1, required_hops: int = 1, entities: List[str] = None):
        self.question = question
        self.ground_truth = ground_truth
        self.question_type = question_type
        self.difficulty = difficulty  # 1-5 难度等级
        self.required_hops = required_hops  # 需要的推理跳数
        self.entities = entities or []
        self.created_at = datetime.now()

class BenchmarkDataset:
    """基准数据集管理"""
    
    def __init__(self):
        self.questions = []
        
    def add_question(self, annotation: QuestionAnnotation):
        """添加标注问题"""
        self.questions.append(annotation)
    
    def load_from_json(self, filepath: str):
        """从JSON文件加载数据集"""
        try:
            with open(filepath, 'r', encoding='utf-8') as f:
                data = json.load(f)
                
            for item in data:
                annotation = QuestionAnnotation(
                    question=item['question'],
                    ground_truth=item['ground_truth'],
                    question_type=QuestionType(item['question_type']),
                    difficulty=item.get('difficulty', 1),
                    required_hops=item.get('required_hops', 1),
                    entities=item.get('entities', [])
                )
                self.add_question(annotation)
                
            logger.info(f"Loaded {len(self.questions)} questions from {filepath}")
            
        except Exception as e:
            logger.error(f"Error loading dataset: {e}")
    
    def save_to_json(self, filepath: str):
        """保存数据集到JSON文件"""
        try:
            data = []
            for q in self.questions:
                data.append({
                    'question': q.question,
                    'ground_truth': q.ground_truth,
                    'question_type': q.question_type.value,
                    'difficulty': q.difficulty,
                    'required_hops': q.required_hops,
                    'entities': q.entities
                })
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
                
            logger.info(f"Saved {len(self.questions)} questions to {filepath}")
            
        except Exception as e:
            logger.error(f"Error saving dataset: {e}")
    
    def get_questions_by_type(self, question_type: QuestionType) -> List[QuestionAnnotation]:
        """根据类型获取问题"""
        return [q for q in self.questions if q.question_type == question_type]
    
    def get_questions_by_difficulty(self, difficulty: int) -> List[QuestionAnnotation]:
        """根据难度获取问题"""
        return [q for q in self.questions if q.difficulty == difficulty]

class AnswerEvaluator:
    """答案评估器"""
    
    def __init__(self, llm_model):
        self.llm_model = llm_model
    
    def evaluate_answer(self, predicted: str, ground_truth: str, question: str) -> Dict[str, float]:
        """评估单个答案"""
        # 精确匹配
        exact_match = 1.0 if predicted.strip().lower() == ground_truth.strip().lower() else 0.0
        
        # 语义相似度评估
        semantic_score = self._evaluate_semantic_similarity(predicted, ground_truth)
        
        # LLM评估
        llm_score = self._evaluate_with_llm(question, predicted, ground_truth)
        
        return {
            'exact_match': exact_match,
            'semantic_similarity': semantic_score,
            'llm_evaluation': llm_score,
            'composite_score': (exact_match * 0.4 + semantic_score * 0.3 + llm_score * 0.3)
        }
    
    def _evaluate_semantic_similarity(self, predicted: str, ground_truth: str) -> float:
        """计算语义相似度"""
        try:
            response = self.llm_model.call_embedding(predicted, ground_truth)
            if response["response_code"] == 200:
                return response["score"]
        except Exception as e:
            logger.error(f"Error calculating semantic similarity: {e}")
        
        return 0.0
    
    def _evaluate_with_llm(self, question: str, predicted: str, ground_truth: str) -> float:
        """使用LLM评估答案质量"""
        system_prompt = """请评估预测答案相对于标准答案的质量。
        考虑以下因素：
        1. 事实准确性
        2. 完整性
        3. 相关性
        
        输出格式：
        ```json
        {"score": 0.85, "reasoning": "评估理由"}
        ```
        
        评分范围：0.0-1.0"""
        
        prompt = f"""【问题】
{question}

【标准答案】
{ground_truth}

【预测答案】
{predicted}

请评估预测答案的质量。"""
        
        try:
            response = self.llm_model.call_llm_local(prompt, system_prompt)
            if response["response_code"] == 200:
                json_data = self._extract_json(response["data"])
                if json_data and "score" in json_data:
                    return float(json_data["score"])
        except Exception as e:
            logger.error(f"Error in LLM evaluation: {e}")
        
        return 0.5  # 默认分数
    
    def _extract_json(self, text: str) -> Optional[Dict]:
        """从文本中提取JSON"""
        import re
        pattern = r"```json\n(.*?)\n```"
        match = re.search(pattern, text, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError:
                pass
        return None

class ExperimentRunner:
    """实验运行器"""
    
    def __init__(self, system, evaluator: AnswerEvaluator):
        self.system = system
        self.evaluator = evaluator
        self.results = []
    
    def run_experiment(self, dataset: BenchmarkDataset, 
                      modes: List = None, max_questions: int = None) -> Dict:
        """运行完整实验"""
        if modes is None:
            from improved_think_on_graph import ReasoningMode
            modes = [ReasoningMode.HYBRID, ReasoningMode.RETRIEVAL_ONLY, ReasoningMode.GRAPH_ONLY]
        
        questions = dataset.questions
        if max_questions:
            questions = questions[:max_questions]
        
        experiment_results = {}
        
        for mode in modes:
            logger.info(f"Running experiment with mode: {mode.value}")
            mode_results = []
            
            for i, q_annotation in enumerate(questions):
                logger.info(f"Processing question {i+1}/{len(questions)}")
                
                # 运行推理
                start_time = time.time()
                result = self.system.answer_question(
                    q_annotation.question, 
                    mode=mode,
                    max_iterations=3
                )
                end_time = time.time()
                
                # 评估答案
                evaluation = self.evaluator.evaluate_answer(
                    result.get('final_answer', ''),
                    q_annotation.ground_truth,
                    q_annotation.question
                )
                
                # 记录结果
                experiment_result = {
                    'question_id': i,
                    'question': q_annotation.question,
                    'ground_truth': q_annotation.ground_truth,
                    'predicted_answer': result.get('final_answer', ''),
                    'question_type': q_annotation.question_type.value,
                    'difficulty': q_annotation.difficulty,
                    'required_hops': q_annotation.required_hops,
                    'reasoning_mode': mode.value,
                    'response_time': end_time - start_time,
                    'confidence': result.get('confidence', 0.0),
                    'reasoning_method': result.get('reasoning_method', ''),
                    **evaluation
                }
                
                mode_results.append(experiment_result)
            
            experiment_results[mode.value] = mode_results
        
        self.results = experiment_results
        return experiment_results
    
    def calculate_metrics(self, results: List[Dict]) -> EvaluationMetrics:
        """计算评估指标"""
        if not results:
            return EvaluationMetrics()
        
        # 基础指标
        accuracy = statistics.mean([r['composite_score'] for r in results])
        response_times = [r['response_time'] for r in results]
        avg_response_time = statistics.mean(response_times)
        avg_confidence = statistics.mean([r['confidence'] for r in results])
        
        # 计算不同评估维度的平均值
        exact_match_avg = statistics.mean([r['exact_match'] for r in results])
        semantic_avg = statistics.mean([r['semantic_similarity'] for r in results])
        llm_eval_avg = statistics.mean([r['llm_evaluation'] for r in results])
        
        return EvaluationMetrics(
            accuracy=accuracy,
            precision=exact_match_avg,
            recall=semantic_avg,
            f1_score=llm_eval_avg,
            response_time=avg_response_time,
            confidence_score=avg_confidence
        )
    
    def generate_report(self, output_dir: str = "experiment_results"):
        """生成实验报告"""
        import os
        os.makedirs(output_dir, exist_ok=True)
        
        # 生成CSV报告
        self._generate_csv_report(output_dir)
        
        # 生成统计分析
        self._generate_statistical_analysis(output_dir)
        
        # 生成可视化图表
        self._generate_visualizations(output_dir)
        
        # 生成LaTeX表格
        self._generate_latex_tables(output_dir)
        
        logger.info(f"Experiment report generated in {output_dir}")
    
    def _generate_csv_report(self, output_dir: str):
        """生成CSV报告"""
        for mode, results in self.results.items():
            filename = f"{output_dir}/results_{mode}.csv"
            
            if results:
                df = pd.DataFrame(results)
                df.to_csv(filename, index=False)
                logger.info(f"CSV report saved: {filename}")
    
    def _generate_statistical_analysis(self, output_dir: str):
        """生成统计分析"""
        analysis = {}
        
        for mode, results in self.results.items():
            if results:
                metrics = self.calculate_metrics(results)
                
                # 按问题类型分析
                type_analysis = {}
                for q_type in QuestionType:
                    type_results = [r for r in results if r['question_type'] == q_type.value]
                    if type_results:
                        type_metrics = self.calculate_metrics(type_results)
                        type_analysis[q_type.value] = asdict(type_metrics)
                
                analysis[mode] = {
                    'overall_metrics': asdict(metrics),
                    'by_question_type': type_analysis,
                    'total_questions': len(results)
                }
        
        # 保存分析结果
        with open(f"{output_dir}/statistical_analysis.json", 'w') as f:
            json.dump(analysis, f, indent=2)
    
    def _generate_visualizations(self, output_dir: str):
        """生成可视化图表"""
        plt.style.use('seaborn-v0_8')
        
        # 准备数据
        all_results = []
        for mode, results in self.results.items():
            for result in results:
                result['mode'] = mode
                all_results.append(result)
        
        if not all_results:
            return
        
        df = pd.DataFrame(all_results)
        
        # 1. 各模式准确率对比
        plt.figure(figsize=(10, 6))
        accuracy_by_mode = df.groupby('mode')['composite_score'].mean()
        plt.bar(accuracy_by_mode.index, accuracy_by_mode.values)
        plt.title('Accuracy by Reasoning Mode')
        plt.ylabel('Accuracy')
        plt.xlabel('Reasoning Mode')
        plt.xticks(rotation=45)
        plt.tight_layout()
        plt.savefig(f'{output_dir}/accuracy_by_mode.png', dpi=300)
        plt.close()
        
        # 2. 问题类型vs准确率
        plt.figure(figsize=(12, 6))
        accuracy_by_type = df.groupby(['question_type', 'mode'])['composite_score'].mean().unstack()
        accuracy_by_type.plot(kind='bar', ax=plt.gca())
        plt.title('Accuracy by Question Type and Mode')
        plt.ylabel('Accuracy')
        plt.xlabel('Question Type')
        plt.xticks(rotation=45)
        plt.legend(title='Reasoning Mode')
        plt.tight_layout()
        plt.savefig(f'{output_dir}/accuracy_by_type_and_mode.png', dpi=300)
        plt.close()
        
        # 3. 响应时间分布
        plt.figure(figsize=(10, 6))
        df.boxplot(column='response_time', by='mode')
        plt.title('Response Time Distribution by Mode')
        plt.ylabel('Response Time (seconds)')
        plt.xlabel('Reasoning Mode')
        plt.tight_layout()
        plt.savefig(f'{output_dir}/response_time_distribution.png', dpi=300)
        plt.close()
        
        # 4. 置信度vs准确率散点图
        plt.figure(figsize=(10, 6))
        for mode in df['mode'].unique():
            mode_data = df[df['mode'] == mode]
            plt.scatter(mode_data['confidence'], mode_data['composite_score'], 
                       label=mode, alpha=0.6)
        
        plt.xlabel('Confidence Score')
        plt.ylabel('Accuracy')
        plt.title('Confidence vs Accuracy')
        plt.legend()
        plt.tight_layout()
        plt.savefig(f'{output_dir}/confidence_vs_accuracy.png', dpi=300)
        plt.close()
    
    def _generate_latex_tables(self, output_dir: str):
        """生成LaTeX表格"""
        latex_content = []
        
        # 主结果表
        latex_content.append("\\begin{table}[htbp]")
        latex_content.append("\\centering")
        latex_content.append("\\caption{Experimental Results Comparison}")
        latex_content.append("\\begin{tabular}{lccccc}")
        latex_content.append("\\toprule")
        latex_content.append("Method & Accuracy & Precision & Recall & F1 & Avg Time (s) \\\\")
        latex_content.append("\\midrule")
        
        for mode, results in self.results.items():
            if results:
                metrics = self.calculate_metrics(results)
                latex_content.append(
                    f"{mode.replace('_', ' ').title()} & "
                    f"{metrics.accuracy:.3f} & "
                    f"{metrics.precision:.3f} & "
                    f"{metrics.recall:.3f} & "
                    f"{metrics.f1_score:.3f} & "
                    f"{metrics.response_time:.2f} \\\\"
                )
        
        latex_content.append("\\bottomrule")
        latex_content.append("\\end{tabular}")
        latex_content.append("\\end{table}")
        
        # 保存LaTeX表格
        with open(f"{output_dir}/results_table.tex", 'w') as f:
            f.write('\n'.join(latex_content))

def create_sample_dataset() -> BenchmarkDataset:
    """创建示例数据集"""
    dataset = BenchmarkDataset()
    
    # 添加示例问题
    sample_questions = [
        QuestionAnnotation(
            question="Who is the father of Ada Lovelace?",
            ground_truth="Lord Byron",
            question_type=QuestionType.FACTUAL,
            difficulty=2,
            required_hops=1,
            entities=["Ada Lovelace", "Lord Byron"]
        ),
        QuestionAnnotation(
            question="What is the capital of the country where the Eiffel Tower is located?",
            ground_truth="Paris",
            question_type=QuestionType.MULTI_HOP,
            difficulty=3,
            required_hops=2,
            entities=["Eiffel Tower", "France", "Paris"]
        ),
        QuestionAnnotation(
            question="When was the first computer invented and by whom?",
            ground_truth="The first electronic computer ENIAC was invented in 1946 by John Presper Eckert Jr. and John William Mauchly",
            question_type=QuestionType.COMPOSITIONAL,
            difficulty=4,
            required_hops=2,
            entities=["ENIAC", "John Presper Eckert Jr.", "John William Mauchly"]
        )
    ]
    
    for q in sample_questions:
        dataset.add_question(q)
    
    return dataset

def run_full_evaluation():
    """运行完整评估"""
    # 导入系统
    from improved_think_on_graph import ThinkOnGraphSystem, ReasoningMode
    
    # 初始化系统和评估器
    system = ThinkOnGraphSystem()
    evaluator = AnswerEvaluator(system.llm_model)
    runner = ExperimentRunner(system, evaluator)
    
    # 创建或加载数据集
    dataset = create_sample_dataset()
    
    # 运行实验
    modes = [ReasoningMode.HYBRID, ReasoningMode.RETRIEVAL_ONLY]
    results = runner.run_experiment(dataset, modes=modes, max_questions=3)
    
    # 生成报告
    runner.generate_report("experiment_results")
    
    return results

if __name__ == "__main__":
    # print(create_sample_dataset())
    results = run_full_evaluation()
    print("Evaluation completed!")
