import json
import os
import time
import logging
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass, asdict
from pathlib import Path
import pandas as pd
from collections import defaultdict
import re
import numpy as np
from datetime import datetime

# 导入原有的Think-on-Graph系统
from improved_think_on_graph import ThinkOnGraphSystem, ReasoningMode

# 配置评估日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('./logs/evaluation.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
eval_logger = logging.getLogger(__name__)

@dataclass
class EvaluationResult:
    """单个问题的评估结果"""
    question_id: str
    question: str
    predicted_answer: str
    ground_truth: List[str]  # 改为List类型以更好地保存多个答案
    confidence: float
    reasoning_method: str
    execution_time: float
    success: bool
    exact_match: float
    f1_score: float
    bleu_score: float
    rouge_l: float
    error_message: str = ""
    
    # 添加完整的推理过程和中间结果
    reasoning_steps: Optional[List[Dict]] = None
    graph_queries: Optional[List[str]] = None
    llm_responses: Optional[List[str]] = None

@dataclass
class DatasetEvaluationResult:
    """数据集整体评估结果"""
    dataset_name: str
    total_questions: int
    successful_predictions: int
    success_rate: float
    average_confidence: float
    average_execution_time: float
    exact_match_avg: float
    f1_avg: float
    bleu_avg: float
    rouge_l_avg: float
    reasoning_mode_distribution: Dict[str, int]
    error_distribution: Dict[str, int]

class DatasetLoader:
    """数据集加载器"""
    
    @staticmethod
    def load_webqsp(file_path: str) -> List[Dict]:
        """加载WebQSP数据集 - 更新为新格式"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        questions = []
        # 直接处理新格式的数据
        for item in data:
            question_text = item.get('question', '')
            answers = item.get('answers', [])
            
            if question_text and answers:
                questions.append({
                    'id': item.get('id', ''),
                    'question': question_text,
                    'answers': answers,
                    'type': 'multi_answer',
                    'topic_entity': item.get('qid_topic_entity', {})
                })
        
        return questions
    
    @staticmethod
    def load_creak(file_path: str) -> List[Dict]:
        """加载CREAK数据集"""
        questions = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f):
                try:
                    item = json.loads(line.strip())
                    questions.append({
                        'id': item.get('id', f'creak_{line_num}'),
                        'question': item.get('sentence', ''),
                        'answers': [str(item.get('label', ''))],
                        'type': 'classification'
                    })
                except json.JSONDecodeError:
                    continue
        
        return questions
    
    @staticmethod
    def load_fever(file_path: str) -> List[Dict]:
        """加载FEVER数据集"""
        questions = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f):
                try:
                    item = json.loads(line.strip())
                    questions.append({
                        'id': item.get('id', f'fever_{line_num}'),
                        'question': item.get('claim', ''),
                        'answers': [item.get('label', 'NOT ENOUGH INFO')],
                        'type': 'verification',
                        'evidence': item.get('evidence', [])
                    })
                except json.JSONDecodeError:
                    continue
        
        return questions
    
    @staticmethod
    def load_hotpotqa(file_path: str) -> List[Dict]:
        """加载HotpotQA数据集"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        questions = []
        for item in data:
            questions.append({
                'id': item.get('_id', ''),
                'question': item.get('question', ''),
                'answers': [item.get('answer', '')],
                'type': 'short_answer',
                'supporting_facts': item.get('supporting_facts', [])
            })
        
        return questions
    
    @staticmethod
    def load_qald(file_path: str) -> List[Dict]:
        """加载QALD数据集"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        questions = []
        for item in data.get('questions', []):
            # 获取英文问题
            question_text = ""
            for q in item.get('question', []):
                if q.get('language') == 'en':
                    question_text = q.get('string', '')
                    break
            
            # 处理答案
            answers = item.get('answers', [])
            if not answers:
                answers = ['']  # 如果没有标准答案，提供空答案
            
            if question_text:
                questions.append({
                    'id': item.get('id', ''),
                    'question': question_text,
                    'answers': answers,
                    'type': 'sparql_based'
                })
        
        return questions

class MetricsCalculator:
    """评估指标计算器"""
    
    @staticmethod
    def exact_match(predicted: str, ground_truth: List[str]) -> float:
        """计算精确匹配分数"""
        predicted = predicted.strip().lower()
        for gt in ground_truth:
            # 只要含有则返回正确
            if gt.strip().lower() in predicted:
                return 1.0
        return 0.0
    
    @staticmethod
    def f1_score(predicted: str, ground_truth: List[str]) -> float:
        """计算F1分数"""
        def normalize_text(text):
            return set(re.findall(r'\b\w+\b', text.lower()))
        
        pred_tokens = normalize_text(predicted)
        max_f1 = 0.0
        
        for gt in ground_truth:
            gt_tokens = normalize_text(gt)
            
            if not pred_tokens and not gt_tokens:
                return 1.0
            if not pred_tokens or not gt_tokens:
                continue
            
            common_tokens = pred_tokens & gt_tokens
            precision = len(common_tokens) / len(pred_tokens) if pred_tokens else 0
            recall = len(common_tokens) / len(gt_tokens) if gt_tokens else 0
            
            if precision + recall == 0:
                f1 = 0.0
            else:
                f1 = 2 * precision * recall / (precision + recall)
            
            max_f1 = max(max_f1, f1)
        
        return max_f1
    
    @staticmethod
    def bleu_score(predicted: str, ground_truth: List[str]) -> float:
        """简化的BLEU分数计算"""
        def get_ngrams(tokens, n):
            return [tuple(tokens[i:i+n]) for i in range(len(tokens)-n+1)]
        
        pred_tokens = predicted.lower().split()
        max_bleu = 0.0
        
        for gt in ground_truth:
            gt_tokens = gt.lower().split()
            
            if not pred_tokens or not gt_tokens:
                continue
            
            # 计算1-gram到4-gram的BLEU
            precisions = []
            for n in range(1, 5):
                pred_ngrams = get_ngrams(pred_tokens, n)
                gt_ngrams = get_ngrams(gt_tokens, n)
                
                if not pred_ngrams:
                    precisions.append(0.0)
                    continue
                
                matches = sum(1 for ng in pred_ngrams if ng in gt_ngrams)
                precision = matches / len(pred_ngrams)
                precisions.append(precision)
            
            # 简化的BLEU计算
            if all(p > 0 for p in precisions):
                bleu = np.exp(np.mean([np.log(p) for p in precisions]))
                # 长度惩罚
                bp = min(1.0, np.exp(1 - len(gt_tokens) / len(pred_tokens)))
                bleu *= bp
                max_bleu = max(max_bleu, bleu)
        
        return max_bleu
    
    @staticmethod
    def rouge_l(predicted: str, ground_truth: List[str]) -> float:
        """计算ROUGE-L分数"""
        def lcs_length(x, y):
            m, n = len(x), len(y)
            dp = [[0] * (n + 1) for _ in range(m + 1)]
            
            for i in range(1, m + 1):
                for j in range(1, n + 1):
                    if x[i-1] == y[j-1]:
                        dp[i][j] = dp[i-1][j-1] + 1
                    else:
                        dp[i][j] = max(dp[i-1][j], dp[i][j-1])
            
            return dp[m][n]
        
        pred_tokens = predicted.lower().split()
        max_rouge = 0.0
        
        for gt in ground_truth:
            gt_tokens = gt.lower().split()
            
            if not pred_tokens and not gt_tokens:
                max_rouge = max(max_rouge, 1.0)
                continue
            if not pred_tokens or not gt_tokens:
                continue
            
            lcs_len = lcs_length(pred_tokens, gt_tokens)
            if len(pred_tokens) + len(gt_tokens) == 0:
                rouge = 0.0
            else:
                precision = lcs_len / len(pred_tokens) if pred_tokens else 0
                recall = lcs_len / len(gt_tokens) if gt_tokens else 0
                
                if precision + recall == 0:
                    rouge = 0.0
                else:
                    rouge = 2 * precision * recall / (precision + recall)
            
            max_rouge = max(max_rouge, rouge)
        
        return max_rouge

class ThinkOnGraphEvaluator:
    """Think-on-Graph评估器"""
    
    def __init__(self, base_url: str = "http://172.20.0.89", temperature: float = 0.1):
        self.system = ThinkOnGraphSystem(base_url, temperature)
        self.metrics_calculator = MetricsCalculator()
        
        # 数据集加载器映射
        self.dataset_loaders = {
            'webqsp': DatasetLoader.load_webqsp,
            'creak': DatasetLoader.load_creak,
            'fever': DatasetLoader.load_fever,
            'hotpotqa': DatasetLoader.load_hotpotqa,
            'qald': DatasetLoader.load_qald
        }
    
    def display_available_datasets(self):
        """显示可用的数据集"""
        print("\n可用的数据集:")
        print("=" * 40)
        datasets = [
            ('webqsp', 'WebQSP - 基于知识图谱的问答'),
            ('creak', 'CREAK - 常识推理和判断'),
            ('fever', 'FEVER - 事实验证'),
            ('hotpotqa', 'HotpotQA - 多跳推理问答'),
            ('qald', 'QALD - SPARQL查询问答')
        ]
        
        for i, (key, desc) in enumerate(datasets, 1):
            print(f"{i}. {key}: {desc}")
        print("=" * 40)
    
    def select_dataset(self) -> Tuple[str, str]:
        """交互式选择数据集"""
        self.display_available_datasets()
        
        dataset_mapping = {
            '1': 'webqsp',
            '2': 'creak', 
            '3': 'fever',
            '4': 'hotpotqa',
            '5': 'qald'
        }
        
        while True:
            choice = input("\n请选择要评估的数据集 (输入数字1-5): ").strip()
            if choice in dataset_mapping:
                dataset_name = dataset_mapping[choice]
                break
            else:
                print("无效选择，请输入1-5之间的数字")

        datasets_config = {
            'webqsp': './data/webqsp_test.json',
            'creak': './data/creak.json',
            'fever_1000': './data/fever_1000.json',
            'fever_1000_entities_azure': './data/fever_1000_entities_azure.json',
            'hotpotadv_dev': './data/hotpotadv_dev.json',
            'hotpotadv_entities_azure': './data/hotpotadv_entities_azure.json',
            'qald_10-en': './data/qald_10-en.json',
            'zero_shot_re': './data/Zero_Shot_RE.json'
        }

        # 转换为列表形式，方便用数字索引选择
        dataset_names = list(datasets_config.keys())

        # 打印选项列表
        print("\n请选择要加载的数据集编号：")
        for idx, name in enumerate(dataset_names, start=1):
            print(f"{idx}. {name}")

        # 获取用户输入的编号
        while True:
            try:
                choice = int(input("请输入编号（1-%d）：" % len(dataset_names)))
                if 1 <= choice <= len(dataset_names):
                    dataset_name = dataset_names[choice - 1]
                    data_path = datasets_config[dataset_name]
                    break
                else:
                    print(f"请输入 1 到 {len(dataset_names)} 之间的数字。")
            except ValueError:
                print("请输入有效的数字。")

        # # 获取数据文件路径
        # while True:
        #     data_path = input(f"\n请输入{dataset_name}数据集的文件路径: ").strip()
        #     if os.path.exists(data_path):
        #         break
        #     else:
        #         print("文件不存在，请重新输入正确的路径")
        
        return dataset_name, data_path
    
    def evaluate_single_question(self, question_data: Dict, mode: ReasoningMode = ReasoningMode.HYBRID) -> EvaluationResult:
        """评估单个问题"""
        question_id = question_data.get('id', 'unknown')
        question = question_data.get('question', '')
        ground_truth = question_data.get('answers', [''])
        
        eval_logger.info(f"Evaluating question {question_id}: {question[:100]}...")
        
        start_time = time.time()
        
        try:
            # 调用Think-on-Graph系统
            result = self.system.answer_question(question, mode=mode)
            
            execution_time = time.time() - start_time
            
            predicted_answer = result.get('final_answer', '')
            confidence = result.get('confidence', 0.0)
            reasoning_method = result.get('reasoning_method', 'unknown')
            
            # 保存完整的推理过程
            reasoning_steps = result.get('reasoning_steps', [])
            graph_queries = result.get('graph_queries', [])
            llm_responses = result.get('llm_responses', [])
            
            # 计算评估指标(先查看评估方法)
            exact_match = self.metrics_calculator.exact_match(predicted_answer, ground_truth)
            f1_score = self.metrics_calculator.f1_score(predicted_answer, ground_truth)
            bleu_score = self.metrics_calculator.bleu_score(predicted_answer, ground_truth)
            rouge_l = self.metrics_calculator.rouge_l(predicted_answer, ground_truth)
            
            success = len(predicted_answer.strip()) > 0 and not predicted_answer.startswith('抱歉')
            
            return EvaluationResult(
                question_id=question_id,
                question=question,
                predicted_answer=predicted_answer,
                ground_truth=ground_truth,
                confidence=confidence,
                reasoning_method=reasoning_method,
                execution_time=execution_time,
                success=success,
                exact_match=exact_match,
                f1_score=f1_score,
                bleu_score=bleu_score,
                rouge_l=rouge_l,
                reasoning_steps=reasoning_steps,
                graph_queries=graph_queries,
                llm_responses=llm_responses
            )
            
        except Exception as e:
            execution_time = time.time() - start_time
            eval_logger.error(f"Error evaluating question {question_id}: {str(e)}")
            
            return EvaluationResult(
                question_id=question_id,
                question=question,
                predicted_answer="",
                ground_truth=ground_truth,
                confidence=0.0,
                reasoning_method="error",
                execution_time=execution_time,
                success=False,
                exact_match=0.0,
                f1_score=0.0,
                bleu_score=0.0,
                rouge_l=0.0,
                error_message=str(e)
            )
    
    def evaluate_dataset(self, dataset_name: str, data_path: str, 
                        max_questions: Optional[int] = None,
                        mode: ReasoningMode = ReasoningMode.HYBRID) -> Tuple[DatasetEvaluationResult, List[EvaluationResult]]:
        """评估整个数据集"""
        eval_logger.info(f"Starting evaluation of dataset: {dataset_name}")
        
        # 加载数据集
        if dataset_name not in self.dataset_loaders:
            raise ValueError(f"Unsupported dataset: {dataset_name}")
        
        loader = self.dataset_loaders[dataset_name]
        questions = loader(data_path)
        
        if max_questions:
            questions = questions[:max_questions]
        
        eval_logger.info(f"Loaded {len(questions)} questions from {dataset_name}")
        
        # 询问是否要限制问题数量
        if len(questions) > 50:
            limit_input = input(f"\n数据集包含{len(questions)}个问题，是否要限制评估数量？(y/n): ").strip().lower()
            if limit_input == 'y':
                while True:
                    try:
                        max_q = int(input("请输入要评估的最大问题数量: "))
                        if 0 < max_q <= len(questions):
                            questions = questions[:max_q]
                            break
                        else:
                            print(f"请输入1到{len(questions)}之间的数字")
                    except ValueError:
                        print("请输入有效的数字")
        
        # 评估每个问题
        results = []
        for i, question_data in enumerate(questions):
            print(f"\n评估进度: {i+1}/{len(questions)}")
            print(f"问题: {question_data.get('question', '')[:100]}...")
            
            result = self.evaluate_single_question(question_data, mode)
            results.append(result)
            
            print(f"预测答案: {result.predicted_answer[:100]}...")
            print(f"执行时间: {result.execution_time:.2f}s")
            print(f"成功: {result.success}")
            
            # 实时保存每个问题的结果
            self._save_single_result(dataset_name, result, mode)
        
        # 计算汇总统计
        dataset_result = self._calculate_dataset_metrics(dataset_name, results)
        return dataset_result, results
    
    def _save_single_result(self, dataset_name: str, result: EvaluationResult, mode: ReasoningMode):
        """实时保存单个问题的结果"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        results_dir = f"./results/{dataset_name}/{mode}"
        os.makedirs(results_dir, exist_ok=True)

        single_result_file = f"{results_dir}/{dataset_name}_single_results_{mode}.jsonl"
        
        # 以追加模式写入单个结果
        with open(single_result_file, 'a', encoding='utf-8') as f:
            json.dump(asdict(result), f, ensure_ascii=False)
            f.write('\n')
    
    def _calculate_dataset_metrics(self, dataset_name: str, 
                                 results: List[EvaluationResult]) -> DatasetEvaluationResult:
        """计算数据集级别的评估指标"""
        total_questions = len(results)
        successful_predictions = sum(1 for r in results if r.success)
        
        if total_questions == 0:
            return DatasetEvaluationResult(
                dataset_name=dataset_name,
                total_questions=0,
                successful_predictions=0,
                success_rate=0.0,
                average_confidence=0.0,
                average_execution_time=0.0,
                exact_match_avg=0.0,
                f1_avg=0.0,
                bleu_avg=0.0,
                rouge_l_avg=0.0,
                reasoning_mode_distribution={},
                error_distribution={}
            )
        
        # 计算平均指标
        avg_confidence = np.mean([r.confidence for r in results])
        avg_execution_time = np.mean([r.execution_time for r in results])
        exact_match_avg = np.mean([r.exact_match for r in results])
        f1_avg = np.mean([r.f1_score for r in results])
        bleu_avg = np.mean([r.bleu_score for r in results])
        rouge_l_avg = np.mean([r.rouge_l for r in results])
        
        # 统计推理方法分布
        reasoning_mode_dist = defaultdict(int)
        for r in results:
            reasoning_mode_dist[r.reasoning_method] += 1
        
        # 统计错误分布
        error_dist = defaultdict(int)
        for r in results:
            if r.error_message:
                error_type = r.error_message.split(':')[0] if ':' in r.error_message else r.error_message
                error_dist[error_type] += 1
        
        return DatasetEvaluationResult(
            dataset_name=dataset_name,
            total_questions=total_questions,
            successful_predictions=successful_predictions,
            success_rate=successful_predictions / total_questions,
            average_confidence=avg_confidence,
            average_execution_time=avg_execution_time,
            exact_match_avg=exact_match_avg,
            f1_avg=f1_avg,
            bleu_avg=bleu_avg,
            rouge_l_avg=rouge_l_avg,
            reasoning_mode_distribution=dict(reasoning_mode_dist),
            error_distribution=dict(error_dist)
        )
    
    def save_complete_results(self, dataset_name: str, dataset_result: DatasetEvaluationResult, 
                            detailed_results: List[EvaluationResult], 
                            output_dir: str = "./results"):
        """保存完整的评估结果"""
        os.makedirs(output_dir, exist_ok=True)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存汇总结果
        summary_file = f"{output_dir}/{dataset_name}_summary_{timestamp}.json"
        with open(summary_file, 'w', encoding='utf-8') as f:
            json.dump(asdict(dataset_result), f, ensure_ascii=False, indent=2)
        
        # 保存详细结果（包含完整推理过程）
        detailed_file = f"{output_dir}/{dataset_name}_detailed_{timestamp}.json"
        with open(detailed_file, 'w', encoding='utf-8') as f:
            json.dump([asdict(r) for r in detailed_results], f, ensure_ascii=False, indent=2)
        
        # 保存CSV格式的结果（用于快速查看）
        csv_file = f"{output_dir}/{dataset_name}_results_{timestamp}.csv"
        df_data = []
        for r in detailed_results:
            row = asdict(r)
            # 将复杂字段转换为字符串以便CSV显示
            row['ground_truth'] = str(row['ground_truth'])
            row['reasoning_steps'] = str(row.get('reasoning_steps', ''))
            row['graph_queries'] = str(row.get('graph_queries', ''))
            row['llm_responses'] = str(row.get('llm_responses', ''))
            df_data.append(row)
        
        df = pd.DataFrame(df_data)
        df.to_csv(csv_file, index=False, encoding='utf-8')
        
        eval_logger.info(f"Complete results saved to {output_dir}")
        print(f"\n结果已保存到:")
        print(f"- 汇总结果: {summary_file}")
        print(f"- 详细结果: {detailed_file}")
        print(f"- CSV结果: {csv_file}")
    
    def run_single_dataset_evaluation(self):
        """运行单个数据集的评估"""
        print("Think-on-Graph 系统评估工具")
        print("=" * 50)
        
        # 选择数据集
        dataset_name, data_path = self.select_dataset()
        
        # 选择推理模式
        print("\n推理模式选择:")
        print("1. HYBRID (混合模式)")
        print("2. GRAPH_ONLY (仅图谱)")
        print("3. RETRIEVAL_ONLY (仅检索)")
        print("4. LLM_ONLY (仅大模型)")
        
        mode_mapping = {
            '1': ReasoningMode.HYBRID,
            '2': ReasoningMode.GRAPH_ONLY,
            '3': ReasoningMode.RETRIEVAL_ONLY,
            '4': ReasoningMode.LLM_ONLY
        }
        
        while True:
            mode_choice = input("请选择推理模式 (1-4): ").strip()
            if mode_choice in mode_mapping:
                mode = mode_mapping[mode_choice]
                break
            else:
                print("无效选择，请输入1-4")
        
        print(f"\n开始评估数据集: {dataset_name}")
        print(f"数据路径: {data_path}")
        print(f"推理模式: {mode}")
        print("=" * 50)
        
        try:
            # 开始评估
            dataset_result, detailed_results = self.evaluate_dataset(
                dataset_name, data_path, mode=mode
            )
            
            # 保存结果
            self.save_complete_results(dataset_name, dataset_result, detailed_results)
            
            # 显示结果摘要
            print(f"\n评估完成！")
            print("=" * 50)
            print(f"数据集: {dataset_name}")
            print(f"总问题数: {dataset_result.total_questions}")
            print(f"成功率: {dataset_result.success_rate:.3f}")
            print(f"精确匹配: {dataset_result.exact_match_avg:.3f}")
            print(f"F1分数: {dataset_result.f1_avg:.3f}")
            print(f"BLEU分数: {dataset_result.bleu_avg:.3f}")
            print(f"ROUGE-L: {dataset_result.rouge_l_avg:.3f}")
            print(f"平均置信度: {dataset_result.average_confidence:.3f}")
            print(f"平均执行时间: {dataset_result.average_execution_time:.2f}秒")
            print("=" * 50)
            
            return dataset_result, detailed_results
            
        except Exception as e:
            eval_logger.error(f"评估失败: {str(e)}")
            print(f"评估过程中出现错误: {str(e)}")
            return None, None

def main():
    """主函数"""
    # 确保必要的目录存在
    os.makedirs("./logs", exist_ok=True)
    os.makedirs("./results", exist_ok=True)
    
    # 创建评估器
    try:
        evaluator = ThinkOnGraphEvaluator()
        
        # 运行评估
        evaluator.run_single_dataset_evaluation()
        
    except KeyboardInterrupt:
        print("\n\n评估被用户中断")
    except Exception as e:
        print(f"\n程序运行出错: {str(e)}")
        eval_logger.error(f"Program error: {str(e)}")

if __name__ == "__main__":
    main()