import json
import os
import time
import logging
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass, asdict
from pathlib import Path
import pandas as pd
from collections import defaultdict
import re
import numpy as np
from datetime import datetime

# 导入原有的Think-on-Graph系统
from improved_think_on_graph import ThinkOnGraphSystem, ReasoningMode

# 配置评估日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('./logs/evaluation.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
eval_logger = logging.getLogger(__name__)

@dataclass
class EvaluationResult:
    """单个问题的评估结果"""
    question_id: str
    question: str
    predicted_answer: str
    ground_truth: str
    confidence: float
    reasoning_method: str
    execution_time: float
    success: bool
    exact_match: float
    f1_score: float
    bleu_score: float
    rouge_l: float
    error_message: str = ""
    # 新增字段：完整的回答详情
    full_response: Dict = None

@dataclass
class DatasetEvaluationResult:
    """数据集整体评估结果"""
    dataset_name: str
    total_questions: int
    successful_predictions: int
    success_rate: float
    average_confidence: float
    average_execution_time: float
    exact_match_avg: float
    f1_avg: float
    bleu_avg: float
    rouge_l_avg: float
    reasoning_mode_distribution: Dict[str, int]
    error_distribution: Dict[str, int]

class DatasetLoader:
    """数据集加载器"""
    
    @staticmethod
    def load_webqsp(file_path: str) -> List[Dict]:
        """加载WebQSP数据集"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        questions = []
        for item in data.get('Questions', []):
            # 提取问题和答案
            question_text = item.get('ProcessedQuestion', item.get('RawQuestion', ''))
            
            # 处理答案
            answers = []
            for parse in item.get('Parses', []):
                if parse.get('InferentialChain'):
                    for answer in parse.get('Answers', []):
                        if isinstance(answer, dict):
                            answers.append(answer.get('AnswerArgument', ''))
                        else:
                            answers.append(str(answer))
            
            if question_text and answers:
                questions.append({
                    'id': item.get('QuestionId', ''),
                    'question': question_text,
                    'answers': list(set(answers)),  # 去重
                    'type': 'multi_answer'
                })
        
        return questions
    
    @staticmethod
    def load_creak(file_path: str) -> List[Dict]:
        """加载CREAK数据集"""
        questions = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f):
                try:
                    item = json.loads(line.strip())
                    questions.append({
                        'id': item.get('id', f'creak_{line_num}'),
                        'question': item.get('sentence', ''),
                        'answers': [str(item.get('label', ''))],
                        'type': 'classification'
                    })
                except json.JSONDecodeError:
                    continue
        
        return questions
    
    @staticmethod
    def load_fever(file_path: str) -> List[Dict]:
        """加载FEVER数据集"""
        questions = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line_num, line in enumerate(f):
                try:
                    item = json.loads(line.strip())
                    questions.append({
                        'id': item.get('id', f'fever_{line_num}'),
                        'question': item.get('claim', ''),
                        'answers': [item.get('label', 'NOT ENOUGH INFO')],
                        'type': 'verification',
                        'evidence': item.get('evidence', [])
                    })
                except json.JSONDecodeError:
                    continue
        
        return questions
    
    @staticmethod
    def load_hotpotqa(file_path: str) -> List[Dict]:
        """加载HotpotQA数据集"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        questions = []
        for item in data:
            questions.append({
                'id': item.get('_id', ''),
                'question': item.get('question', ''),
                'answers': [item.get('answer', '')],
                'type': 'short_answer',
                'supporting_facts': item.get('supporting_facts', [])
            })
        
        return questions
    
    @staticmethod
    def load_qald(file_path: str) -> List[Dict]:
        """加载QALD数据集"""
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        questions = []
        for item in data.get('questions', []):
            # 获取英文问题
            question_text = ""
            for q in item.get('question', []):
                if q.get('language') == 'en':
                    question_text = q.get('string', '')
                    break
            
            # 处理答案
            answers = []
            query_result = item.get('query', {})
            if query_result:
                # 从SPARQL查询结果中提取答案
                sparql = query_result.get('sparql', '')
                # 这里可能需要根据实际的QALD格式调整
                answers = item.get('answers', [])
                if not answers:
                    answers = ['']  # 如果没有标准答案，提供空答案
            
            if question_text:
                questions.append({
                    'id': item.get('id', ''),
                    'question': question_text,
                    'answers': answers if answers else [''],
                    'type': 'sparql_based'
                })
        
        return questions

class MetricsCalculator:
    """评估指标计算器"""
    
    @staticmethod
    def exact_match(predicted: str, ground_truth: List[str]) -> float:
        """计算精确匹配分数"""
        predicted = predicted.strip().lower()
        for gt in ground_truth:
            if predicted == gt.strip().lower():
                return 1.0
        return 0.0
    
    @staticmethod
    def f1_score(predicted: str, ground_truth: List[str]) -> float:
        """计算F1分数"""
        def normalize_text(text):
            return set(re.findall(r'\b\w+\b', text.lower()))
        
        pred_tokens = normalize_text(predicted)
        max_f1 = 0.0
        
        for gt in ground_truth:
            gt_tokens = normalize_text(gt)
            
            if not pred_tokens and not gt_tokens:
                return 1.0
            if not pred_tokens or not gt_tokens:
                continue
            
            common_tokens = pred_tokens & gt_tokens
            precision = len(common_tokens) / len(pred_tokens)
            recall = len(common_tokens) / len(gt_tokens)
            
            if precision + recall == 0:
                f1 = 0.0
            else:
                f1 = 2 * precision * recall / (precision + recall)
            
            max_f1 = max(max_f1, f1)
        
        return max_f1
    
    @staticmethod
    def bleu_score(predicted: str, ground_truth: List[str]) -> float:
        """简化的BLEU分数计算"""
        def get_ngrams(tokens, n):
            return [tuple(tokens[i:i+n]) for i in range(len(tokens)-n+1)]
        
        pred_tokens = predicted.lower().split()
        max_bleu = 0.0
        
        for gt in ground_truth:
            gt_tokens = gt.lower().split()
            
            if not pred_tokens or not gt_tokens:
                continue
            
            # 计算1-gram到4-gram的BLEU
            precisions = []
            for n in range(1, 5):
                pred_ngrams = get_ngrams(pred_tokens, n)
                gt_ngrams = get_ngrams(gt_tokens, n)
                
                if not pred_ngrams:
                    precisions.append(0.0)
                    continue
                
                matches = sum(1 for ng in pred_ngrams if ng in gt_ngrams)
                precision = matches / len(pred_ngrams)
                precisions.append(precision)
            
            # 简化的BLEU计算
            if all(p > 0 for p in precisions):
                bleu = np.exp(np.mean([np.log(p) for p in precisions]))
                # 长度惩罚
                bp = min(1.0, np.exp(1 - len(gt_tokens) / len(pred_tokens)))
                bleu *= bp
                max_bleu = max(max_bleu, bleu)
        
        return max_bleu
    
    @staticmethod
    def rouge_l(predicted: str, ground_truth: List[str]) -> float:
        """计算ROUGE-L分数"""
        def lcs_length(x, y):
            m, n = len(x), len(y)
            dp = [[0] * (n + 1) for _ in range(m + 1)]
            
            for i in range(1, m + 1):
                for j in range(1, n + 1):
                    if x[i-1] == y[j-1]:
                        dp[i][j] = dp[i-1][j-1] + 1
                    else:
                        dp[i][j] = max(dp[i-1][j], dp[i][j-1])
            
            return dp[m][n]
        
        pred_tokens = predicted.lower().split()
        max_rouge = 0.0
        
        for gt in ground_truth:
            gt_tokens = gt.lower().split()
            
            if not pred_tokens and not gt_tokens:
                max_rouge = max(max_rouge, 1.0)
                continue
            if not pred_tokens or not gt_tokens:
                continue
            
            lcs_len = lcs_length(pred_tokens, gt_tokens)
            if len(pred_tokens) + len(gt_tokens) == 0:
                rouge = 0.0
            else:
                precision = lcs_len / len(pred_tokens) if pred_tokens else 0
                recall = lcs_len / len(gt_tokens) if gt_tokens else 0
                
                if precision + recall == 0:
                    rouge = 0.0
                else:
                    rouge = 2 * precision * recall / (precision + recall)
            
            max_rouge = max(max_rouge, rouge)
        
        return max_rouge

class ThinkOnGraphEvaluator:
    """Think-on-Graph评估器"""
    
    def __init__(self, base_url: str = "http://172.20.0.89", temperature: float = 0.1):
        self.system = ThinkOnGraphSystem(base_url, temperature)
        self.metrics_calculator = MetricsCalculator()
        self.results = []
        
        # 数据集加载器映射
        self.dataset_loaders = {
            'webqsp': DatasetLoader.load_webqsp,
            'creak': DatasetLoader.load_creak,
            'fever_1000': DatasetLoader.load_fever,
            'fever_1000_entities_azure': DatasetLoader.load_fever,
            'hotpotadv_dev': DatasetLoader.load_hotpotqa,
            'hotpotadv_entities_azure': DatasetLoader.load_hotpotqa,
            'qald_10-en': DatasetLoader.load_qald
        }
    
    def get_available_datasets(self) -> List[str]:
        """获取可用的数据集列表"""
        return list(self.dataset_loaders.keys())
    
    def evaluate_single_question(self, question_data: Dict, mode: ReasoningMode = ReasoningMode.HYBRID) -> EvaluationResult:
        """评估单个问题"""
        question_id = question_data.get('id', 'unknown')
        question = question_data.get('question', '')
        ground_truth = question_data.get('answers', [''])
        
        eval_logger.info(f"Evaluating question {question_id}: {question[:100]}...")
        
        start_time = time.time()
        
        try:
            # 调用Think-on-Graph系统
            result = self.system.answer_question(question, mode=mode)
            
            execution_time = time.time() - start_time
            
            predicted_answer = result.get('final_answer', '')
            confidence = result.get('confidence', 0.0)
            reasoning_method = result.get('reasoning_method', 'unknown')
            
            # 计算评估指标
            exact_match = self.metrics_calculator.exact_match(predicted_answer, ground_truth)
            f1_score = self.metrics_calculator.f1_score(predicted_answer, ground_truth)
            bleu_score = self.metrics_calculator.bleu_score(predicted_answer, ground_truth)
            rouge_l = self.metrics_calculator.rouge_l(predicted_answer, ground_truth)
            
            success = len(predicted_answer.strip()) > 0 and not predicted_answer.startswith('抱歉')
            
            return EvaluationResult(
                question_id=question_id,
                question=question,
                predicted_answer=predicted_answer,
                ground_truth=str(ground_truth),
                confidence=confidence,
                reasoning_method=reasoning_method,
                execution_time=execution_time,
                success=success,
                exact_match=exact_match,
                f1_score=f1_score,
                bleu_score=bleu_score,
                rouge_l=rouge_l,
                full_response=result  # 保存完整的回答详情
            )
            
        except Exception as e:
            execution_time = time.time() - start_time
            eval_logger.error(f"Error evaluating question {question_id}: {str(e)}")
            
            return EvaluationResult(
                question_id=question_id,
                question=question,
                predicted_answer="",
                ground_truth=str(ground_truth),
                confidence=0.0,
                reasoning_method="error",
                execution_time=execution_time,
                success=False,
                exact_match=0.0,
                f1_score=0.0,
                bleu_score=0.0,
                rouge_l=0.0,
                error_message=str(e),
                full_response={"error": str(e)}
            )
    
    def evaluate_dataset(self, dataset_name: str, data_path: str, 
                        max_questions: Optional[int] = None,
                        mode: ReasoningMode = ReasoningMode.HYBRID,
                        start_index: int = 0) -> Tuple[DatasetEvaluationResult, List[EvaluationResult]]:
        """评估单个数据集"""
        eval_logger.info(f"Starting evaluation of dataset: {dataset_name}")
        
        # 加载数据集
        if dataset_name not in self.dataset_loaders:
            raise ValueError(f"Unsupported dataset: {dataset_name}. Available datasets: {list(self.dataset_loaders.keys())}")
        
        loader = self.dataset_loaders[dataset_name]
        questions = loader(data_path)
        
        # 应用起始索引和最大问题数限制
        if start_index > 0:
            questions = questions[start_index:]
        
        if max_questions:
            questions = questions[:max_questions]
        
        eval_logger.info(f"Loaded {len(questions)} questions from {dataset_name} (starting from index {start_index})")
        
        # 评估每个问题
        results = []
        for i, question_data in enumerate(questions):
            actual_index = start_index + i
            eval_logger.info(f"Progress: {i+1}/{len(questions)} (Overall index: {actual_index})")
            
            result = self.evaluate_single_question(question_data, mode)
            results.append(result)
            
            # 每5个问题保存一次中间结果
            if (i + 1) % 5 == 0:
                self._save_intermediate_results(dataset_name, results, actual_index - i)
        
        # 计算汇总统计
        dataset_result = self._calculate_dataset_metrics(dataset_name, results)
        
        return dataset_result, results
    
    def _calculate_dataset_metrics(self, dataset_name: str, 
                                 results: List[EvaluationResult]) -> DatasetEvaluationResult:
        """计算数据集级别的评估指标"""
        total_questions = len(results)
        successful_predictions = sum(1 for r in results if r.success)
        
        if total_questions == 0:
            return DatasetEvaluationResult(
                dataset_name=dataset_name,
                total_questions=0,
                successful_predictions=0,
                success_rate=0.0,
                average_confidence=0.0,
                average_execution_time=0.0,
                exact_match_avg=0.0,
                f1_avg=0.0,
                bleu_avg=0.0,
                rouge_l_avg=0.0,
                reasoning_mode_distribution={},
                error_distribution={}
            )
        
        # 计算平均指标
        avg_confidence = np.mean([r.confidence for r in results])
        avg_execution_time = np.mean([r.execution_time for r in results])
        exact_match_avg = np.mean([r.exact_match for r in results])
        f1_avg = np.mean([r.f1_score for r in results])
        bleu_avg = np.mean([r.bleu_score for r in results])
        rouge_l_avg = np.mean([r.rouge_l for r in results])
        
        # 统计推理方法分布
        reasoning_mode_dist = defaultdict(int)
        for r in results:
            reasoning_mode_dist[r.reasoning_method] += 1
        
        # 统计错误分布
        error_dist = defaultdict(int)
        for r in results:
            if r.error_message:
                error_type = r.error_message.split(':')[0] if ':' in r.error_message else r.error_message
                error_dist[error_type] += 1
        
        return DatasetEvaluationResult(
            dataset_name=dataset_name,
            total_questions=total_questions,
            successful_predictions=successful_predictions,
            success_rate=successful_predictions / total_questions,
            average_confidence=avg_confidence,
            average_execution_time=avg_execution_time,
            exact_match_avg=exact_match_avg,
            f1_avg=f1_avg,
            bleu_avg=bleu_avg,
            rouge_l_avg=rouge_l_avg,
            reasoning_mode_distribution=dict(reasoning_mode_dist),
            error_distribution=dict(error_dist)
        )
    
    def _save_intermediate_results(self, dataset_name: str, results: List[EvaluationResult], start_index: int = 0):
        """保存中间结果"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"./results/{dataset_name}_intermediate_{start_index}_{timestamp}.json"
        
        os.makedirs("./results", exist_ok=True)
        
        # 保存包含完整回答的结果
        results_with_full_response = []
        for r in results:
            result_dict = asdict(r)
            results_with_full_response.append(result_dict)
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(results_with_full_response, f, ensure_ascii=False, indent=2)
        
        eval_logger.info(f"Intermediate results saved to {filename}")
    
    def save_results(self, dataset_result: DatasetEvaluationResult, 
                    detailed_results: List[EvaluationResult], 
                    output_dir: str = "./results",
                    start_index: int = 0):
        """保存评估结果"""
        os.makedirs(output_dir, exist_ok=True)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存汇总结果
        summary_file = f"{output_dir}/{dataset_result.dataset_name}_summary_{start_index}_{timestamp}.json"
        with open(summary_file, 'w', encoding='utf-8') as f:
            json.dump(asdict(dataset_result), f, ensure_ascii=False, indent=2)
        
        # 保存详细结果（包含完整回答）
        detailed_file = f"{output_dir}/{dataset_result.dataset_name}_detailed_{start_index}_{timestamp}.json"
        detailed_results_dict = []
        for r in detailed_results:
            result_dict = asdict(r)
            detailed_results_dict.append(result_dict)
        
        with open(detailed_file, 'w', encoding='utf-8') as f:
            json.dump(detailed_results_dict, f, ensure_ascii=False, indent=2)
        
        # 保存仅包含回答的JSON文件
        answers_file = f"{output_dir}/{dataset_result.dataset_name}_answers_{start_index}_{timestamp}.json"
        answers_data = []
        for r in detailed_results:
            answers_data.append({
                'question_id': r.question_id,
                'question': r.question,
                'predicted_answer': r.predicted_answer,
                'ground_truth': r.ground_truth,
                'full_response': r.full_response,
                'success': r.success,
                'exact_match': r.exact_match,
                'f1_score': r.f1_score,
                'confidence': r.confidence,
                'execution_time': r.execution_time
            })
        
        with open(answers_file, 'w', encoding='utf-8') as f:
            json.dump(answers_data, f, ensure_ascii=False, indent=2)
        
        # 保存CSV格式的结果
        csv_file = f"{output_dir}/{dataset_result.dataset_name}_results_{start_index}_{timestamp}.csv"
        df_data = []
        for r in detailed_results:
            row = asdict(r)
            # 将full_response转为字符串以便CSV存储
            row['full_response'] = json.dumps(row['full_response'], ensure_ascii=False) if row['full_response'] else ""
            df_data.append(row)
        
        df = pd.DataFrame(df_data)
        df.to_csv(csv_file, index=False, encoding='utf-8')
        
        eval_logger.info(f"Results saved to {output_dir}")
        eval_logger.info(f"Summary: {summary_file}")
        eval_logger.info(f"Detailed: {detailed_file}")
        eval_logger.info(f"Answers only: {answers_file}")
        eval_logger.info(f"CSV: {csv_file}")

def select_dataset_interactive():
    """交互式选择数据集"""
    datasets_config = {
        'webqsp': './data/webqsp_test.json',
        'creak': './data/creak.json',
        'fever_1000': './data/fever_1000.json',
        'fever_1000_entities_azure': './data/fever_1000_entities_azure.json',
        'hotpotadv_dev': './data/hotpotadv_dev.json',
        'hotpotadv_entities_azure': './data/hotpotadv_entities_azure.json',
        'qald_10-en': './data/qald_10-en.json',
    }
    
    print("\n可用的数据集:")
    print("=" * 50)
    for i, dataset_name in enumerate(datasets_config.keys(), 1):
        print(f"{i}. {dataset_name}")
    
    while True:
        try:
            choice = input(f"\n请选择要评估的数据集 (1-{len(datasets_config)}): ").strip()
            choice_idx = int(choice) - 1
            
            if 0 <= choice_idx < len(datasets_config):
                dataset_names = list(datasets_config.keys())
                selected_dataset = dataset_names[choice_idx]
                data_path = datasets_config[selected_dataset]
                
                # 检查文件是否存在
                if not os.path.exists(data_path):
                    print(f"警告: 数据文件 {data_path} 不存在!")
                    continue
                
                return selected_dataset, data_path
            else:
                print("无效的选择，请重新输入!")
        except ValueError:
            print("请输入数字!")
        except KeyboardInterrupt:
            print("\n\n用户取消操作")
            return None, None

def get_evaluation_parameters():
    """获取评估参数"""
    print("\n评估参数设置:")
    print("=" * 30)
    
    # 最大问题数
    max_questions = input("最大评估问题数 (直接回车使用默认值100): ").strip()
    if max_questions:
        try:
            max_questions = int(max_questions)
        except ValueError:
            print("无效输入，使用默认值100")
            max_questions = 100
    else:
        max_questions = 100
    
    # 起始索引
    start_index = input("起始索引 (直接回车从0开始): ").strip()
    if start_index:
        try:
            start_index = int(start_index)
        except ValueError:
            print("无效输入，使用默认值0")
            start_index = 0
    else:
        start_index = 0
    
    # 推理模式
    print("\n推理模式:")
    print("1. HYBRID (混合模式)")
    print("2. RETRIEVAL_ONLY (直接模式)")
    print("3. GRAPH_ONLY (基于图的模式)")
    
    mode_choice = input("选择推理模式 (直接回车使用默认HYBRID): ").strip()
    mode_map = {
        '1': ReasoningMode.HYBRID,
        '2': ReasoningMode.RETRIEVAL_ONLY,
        '3': ReasoningMode.GRAPH_ONLY
    }
    
    mode = mode_map.get(mode_choice, ReasoningMode.HYBRID)
    
    return max_questions, start_index, mode

def main():
    """主评估函数"""
    print("Think-on-Graph 数据集评估工具")
    print("=" * 50)
    
    # 创建必要的目录
    os.makedirs("./logs", exist_ok=True)
    os.makedirs("./results", exist_ok=True)
    
    # 选择数据集
    dataset_name, data_path = select_dataset_interactive()
    if not dataset_name:
        return
    
    # 获取评估参数
    max_questions, start_index, mode = get_evaluation_parameters()
    
    print(f"\n评估配置:")
    print(f"数据集: {dataset_name}")
    print(f"数据路径: {data_path}")
    print(f"最大问题数: {max_questions}")
    print(f"起始索引: {start_index}")
    print(f"推理模式: {mode}")
    
    confirm = input("\n确认开始评估? (y/N): ").strip().lower()
    if confirm != 'y':
        print("评估已取消")
        return
    
    # 创建评估器
    try:
        evaluator = ThinkOnGraphEvaluator()
    except Exception as e:
        print(f"创建评估器失败: {e}")
        return
    
    # 运行评估
    try:
        print(f"\n开始评估数据集: {dataset_name}")
        print("=" * 50)
        
        dataset_result, detailed_results = evaluator.evaluate_dataset(
            dataset_name=dataset_name,
            data_path=data_path,
            max_questions=max_questions,
            mode=mode,
            start_index=start_index
        )
        
        # 保存结果
        evaluator.save_results(dataset_result, detailed_results, start_index=start_index)
        
        # 打印汇总信息
        print(f"\n评估完成!")
        print("=" * 50)
        print(f"数据集: {dataset_name}")
        print(f"总问题数: {dataset_result.total_questions}")
        print(f"成功率: {dataset_result.success_rate:.3f}")
        print(f"精确匹配: {dataset_result.exact_match_avg:.3f}")
        print(f"F1分数: {dataset_result.f1_avg:.3f}")
        print(f"BLEU分数: {dataset_result.bleu_avg:.3f}")
        print(f"ROUGE-L: {dataset_result.rouge_l_avg:.3f}")
        print(f"平均置信度: {dataset_result.average_confidence:.3f}")
        print(f"平均执行时间: {dataset_result.average_execution_time:.2f}秒")
        
        print(f"\n推理方法分布:")
        for method, count in dataset_result.reasoning_mode_distribution.items():
            print(f"  {method}: {count}")
        
        if dataset_result.error_distribution:
            print(f"\n错误分布:")
            for error_type, count in dataset_result.error_distribution.items():
                print(f"  {error_type}: {count}")
        
        print(f"\n结果文件已保存到 ./results/ 目录")
        
    except Exception as e:
        eval_logger.error(f"评估过程中发生错误: {str(e)}")
        print(f"评估失败: {e}")
        return

if __name__ == "__main__":
    main()