"""
数据收集模块 - 负责问题检索、知识块评分等数据准备工作
"""
import csv
import json
import os
import requests
from typing import List, Dict, Any, Optional, Tuple
import pandas as pd
import pickle
import time
from datetime import datetime

# 导入API客户端模块
from src.api_client import search_knowledge_chunks, evaluate_chunks_batch

def read_questions_from_csv(csv_path: str, question_column: int = 0) -> List[str]:
    """
    从CSV文件中读取问题列表
    
    参数:
        csv_path: CSV文件路径
        question_column: 问题所在的列索引（默认为第一列，索引为0）
        
    返回:
        问题列表
    """
    questions = []
    with open(csv_path, 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        next(reader, None)  # 跳过表头
        for row in reader:
            if len(row) > question_column:
                questions.append(row[question_column])
    
    print(f"从 {csv_path} 读取了 {len(questions)} 个问题")
    return questions

def retrieve_knowledge_chunks(question: str, top_k: int = 50) -> List[str]:
    """
    调用检索召回接口，获取知识块
    
    参数:
        question: 问题文本
        top_k: 返回的知识块数量
        
    返回:
        知识块列表
        
    异常:
        如果API调用失败或未检索到知识块，抛出RuntimeError
    """
    # 调用实际的API
    print(f"为问题 '{question}' 检索 {top_k} 个知识块")
    
    # 使用api_client中的检索API
    chunks = search_knowledge_chunks(question, top_k)
    
    if not chunks:
        raise RuntimeError(f"知识块检索失败：未能为问题 '{question}' 检索到任何知识块")
    
    return chunks

def evaluate_chunks_with_llm(question: str, chunks: List[str]) -> List[Tuple[str, float, str]]:
    """
    使用大模型对知识块进行评分
    
    参数:
        question: 问题文本
        chunks: 知识块列表
        
    返回:
        元组列表，每个元组包含 (知识块, 评分, 理由)
        
    异常:
        如果评分API调用失败，抛出RuntimeError
    """
    print(f"使用大模型评估问题 '{question}' 的 {len(chunks)} 个知识块")
    
    # 使用api_client中的批量评分API
    results = evaluate_chunks_batch(question, chunks)
    
    if not results:
        raise RuntimeError(f"知识块评分失败：未能为问题 '{question}' 的知识块进行评分")
    
    return results

def save_evaluation_results(results: Dict[str, List[Tuple[str, float, str]]], output_path: str):
    """
    将评估结果保存为CSV文件
    
    参数:
        results: 评估结果字典，键为问题，值为(知识块,评分,理由)元组列表
        output_path: 输出CSV文件路径
    """
    with open(output_path, 'w', encoding='utf-8-sig', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['问题', '知识块', '评分', '理由'])
        
        for question, chunk_results in results.items():
            for chunk, score, reason in chunk_results:
                writer.writerow([question, chunk, score, reason])
    
    print(f"评估结果已保存到 {output_path}")

def load_progress(progress_file: str) -> Tuple[List[str], Dict[str, List[Tuple[str, float, str]]]]:
    """
    加载之前的执行进度
    
    参数:
        progress_file: 进度文件路径
        
    返回:
        已完成的问题列表和结果字典
    """
    if not os.path.exists(progress_file):
        return [], {}
    
    try:
        with open(progress_file, 'rb') as f:
            progress_data = pickle.load(f)
            completed_questions = progress_data.get('completed_questions', [])
            results = progress_data.get('results', {})
            print(f"从进度文件恢复: 已完成 {len(completed_questions)} 个问题")
            return completed_questions, results
    except Exception as e:
        print(f"加载进度文件失败: {str(e)}")
        return [], {}

def save_progress(progress_file: str, completed_questions: List[str], results: Dict[str, List[Tuple[str, float, str]]]):
    """
    保存当前执行进度
    
    参数:
        progress_file: 进度文件路径
        completed_questions: 已完成的问题列表
        results: 当前结果字典
    """
    try:
        progress_data = {
            'completed_questions': completed_questions,
            'results': results,
            'timestamp': datetime.now().isoformat(),
            'total_completed': len(completed_questions)
        }
        
        with open(progress_file, 'wb') as f:
            pickle.dump(progress_data, f)
        
        print(f"进度已保存: {len(completed_questions)} 个问题完成")
    except Exception as e:
        print(f"保存进度失败: {str(e)}")

def cleanup_progress_files(progress_file: str):
    """
    清理进度文件
    
    参数:
        progress_file: 进度文件路径
    """
    try:
        if os.path.exists(progress_file):
            os.remove(progress_file)
            print("已清理进度文件")
    except Exception as e:
        print(f"清理进度文件失败: {str(e)}")

def run_evaluation_pipeline(input_csv: str, output_csv: str, top_k: int = 50):
    """
    运行完整的评估流程，支持断点续传
    
    参数:
        input_csv: 输入CSV文件路径（包含问题列表）
        output_csv: 输出CSV文件路径（保存评估结果）
        top_k: 每个问题检索的知识块数量
    """
    # 生成进度文件路径
    progress_file = f"{output_csv}.progress"
    
    print("="*50)
    print(f"开始评估流程 - 输入: {input_csv}, 输出: {output_csv}")
    print(f"进度文件: {progress_file}")
    print("="*50)
    
    # 1. 读取问题列表
    all_questions = read_questions_from_csv(input_csv)
    print(f"总共需要处理 {len(all_questions)} 个问题")
    
    # 2. 加载之前的进度（如果存在）
    completed_questions, all_results = load_progress(progress_file)
    
    # 3. 计算还需要处理的问题
    remaining_questions = [q for q in all_questions if q not in completed_questions]
    
    if completed_questions:
        print(f"检测到之前的进度: 已完成 {len(completed_questions)} 个问题")
        print(f"还需要处理 {len(remaining_questions)} 个问题")
        
        # 显示已完成的问题列表（最多显示5个）
        if len(completed_questions) <= 5:
            print(f"已完成的问题: {completed_questions}")
        else:
            print(f"已完成的问题（前5个）: {completed_questions[:5]} ... （共{len(completed_questions)}个）")
    else:
        print("开始新的评估流程")
    
    # 4. 处理剩余的问题
    if remaining_questions:
        print(f"\n开始处理剩余的 {len(remaining_questions)} 个问题...")
        
        # 记录开始时已完成的问题数量
        initial_completed_count = len(completed_questions)
        
        for i, question in enumerate(remaining_questions):
            try:
                # 计算当前问题在总问题列表中的正确索引
                current_question_index = initial_completed_count + i + 1
                print(f"\n--- 处理问题 {current_question_index}/{len(all_questions)} ---")
                print(f"问题: {question}")
                start_time = time.time()
                
                # 4.1 检索知识块
                print("步骤1: 检索知识块...")
                chunks = retrieve_knowledge_chunks(question, top_k)
                print(f"成功检索到 {len(chunks)} 个知识块")
                
                # 4.2 使用大模型评分
                print("步骤2: LLM评分...")
                chunk_results = evaluate_chunks_with_llm(question, chunks)
                print(f"成功评估 {len(chunk_results)} 个知识块")
                
                # 4.3 保存结果
                all_results[question] = chunk_results
                completed_questions.append(question)
                
                # 4.4 保存进度
                save_progress(progress_file, completed_questions, all_results)
                
                elapsed_time = time.time() - start_time
                print(f"问题处理完成，耗时: {elapsed_time:.2f}秒")
                print(f"进度: {len(completed_questions)}/{len(all_questions)} ({len(completed_questions)/len(all_questions)*100:.1f}%)")
                
                # 为了避免API限流，短暂休息
                if i < len(remaining_questions) - 1:  # 不是最后一个问题
                    time.sleep(1)
                    
            except Exception as e:
                print(f"处理问题时发生错误: {str(e)}")
                print(f"当前进度已保存，可以重新运行脚本继续执行")
                raise e
    
    # 5. 保存最终结果
    print(f"\n保存最终评估结果到 {output_csv}...")
    save_evaluation_results(all_results, output_csv)
    
    # 6. 清理进度文件
    cleanup_progress_files(progress_file)
    
    print("="*50)
    print(f"✅ 评估流程完成！")
    print(f"📊 共处理 {len(all_questions)} 个问题")
    print(f"📁 结果已保存到 {output_csv}")
    print("="*50)

if __name__ == "__main__":
    # 示例用法
    input_csv = "data/raw/questions.csv"
    output_csv = "data/processed/evaluation_results.csv"
    
    run_evaluation_pipeline(input_csv, output_csv) 