import json
import os
import time
from datetime import datetime
from tqdm import tqdm

def load_data(file_path):
    """Load data from various file formats"""
    if file_path.endswith('.jsonl'):
        data = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    data.append(json.loads(line))
        return data
    else:
        with open(file_path, 'r', encoding='utf-8') as f:
            return json.load(f)

def trim_prediction(prediction, max_chars=200):
    """Trim prediction to max characters (适用于中文)"""
    if not prediction:
        return ""
    if len(prediction) <= max_chars:
        return prediction
    return prediction[:max_chars].rstrip() + "..."

def count_completed_predictions(predictions_file):
    """Count already completed predictions"""
    if not os.path.exists(predictions_file):
        return 0
    
    count = 0
    try:
        with open(predictions_file, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    count += 1
    except:
        return 0
    return count

def handle_multi_turn_query(item):
    """处理多轮对话查询"""
    if isinstance(item.get("query"), list):
        # 多轮对话，合并所有问题
        queries = item["query"]
        current_query = " ".join(queries)
        # 对于多轮对话，答案也可能是列表
        if isinstance(item.get("answer"), list):
            ground_truth = " ".join([str(ans) for ans in item["answer"]])
        else:
            ground_truth = str(item.get("answer", "")).strip()
    else:
        # 单轮对话
        current_query = item.get("query", "")
        ground_truth = str(item.get("answer", "")).strip()
    
    return current_query, ground_truth


def generate_predictions_only(data_path, model, batch_size=10):
    """生成中文RAG预测结果"""
    print("开始生成预测结果...")
    
    # 创建会话文件
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    predictions_file = f"chinese_predictions_{timestamp}.jsonl"
    progress_file = f"chinese_progress_{timestamp}.json"
    
    # 加载数据
    print(f"从以下路径加载数据: {data_path}")
    data = load_data(data_path)
    total_items = len(data)
    print(f"总共需要处理的项目数: {total_items}")
    
    # 检查现有进度
    completed = count_completed_predictions(predictions_file)
    if completed > 0:
        print(f"发现现有进度: {completed} 项已完成")
        print("从上次中断处继续...")
    
    # 处理剩余项目
    remaining_data = data[completed:]
    
    with open(predictions_file, 'a', encoding='utf-8') as f:
        for i, item in enumerate(tqdm(remaining_data, desc="生成预测结果", initial=completed, total=total_items)):
            try:
                # 提取项目数据
                current_query, ground_truth = handle_multi_turn_query(item)
                search_results = item.get("search_results", [])
                query_time = item.get("query_time", datetime.now().isoformat())
                domain = item.get("domain", "")
                question_type = item.get("question_type", "")
                
                current_index = completed + i
                
                print(f"\n[{current_index + 1}/{total_items}] 处理查询: {current_query[:50]}...")
                print(f"领域: {domain}, 问题类型: {question_type}")
                
                # 修正的预测生成调用
                start_time = time.time()
                try:
                    # 使用正确的方法名和参数
                    prediction = model.chinese_generate_answer(
                        query=current_query,
                        search_results=search_results,
                        query_time=query_time,
                        conversation_history=None  # 或者从item中获取如果有的话
                    )
                    if prediction is None:
                        prediction = "生成失败"
                except Exception as gen_error:
                    print(f"生成错误: {gen_error}")
                    prediction = "生成失败"
                    
                generation_time = time.time() - start_time
                
                # 清理和修剪预测结果
                prediction_clean = str(prediction).strip() if prediction else ""
                prediction_trimmed = trim_prediction(prediction_clean)
                
                # 创建结果记录
                result = {
                    "id": current_index,
                    "interaction_id": item.get("interaction_id", current_index),
                    "domain": domain,
                    "question_type": question_type,
                    "query": current_query,
                    "ground_truth": ground_truth,
                    "prediction": prediction_trimmed,
                    "raw_prediction": prediction_clean,
                    "generation_time": round(generation_time, 3),
                    "timestamp": datetime.now().isoformat(),
                    "metadata": {
                        "has_search_results": len(search_results) > 0,
                        "search_results_count": len(search_results),
                        "query_length": len(current_query),
                        "prediction_length": len(prediction_clean) if prediction_clean else 0,
                        "is_multi_turn": isinstance(item.get("query"), list),
                        "conversation_type": item.get("conversation_type", "single"),
                        "total_turns": item.get("total_turns", 1)
                    }
                }
                
                # 立即保存结果
                f.write(json.dumps(result, ensure_ascii=False) + '\n')
                f.flush()
                
                print(f"标准答案: {ground_truth[:100]}...")
                print(f"预测结果: {prediction_trimmed[:100]}...")
                print(f"生成时间: {generation_time:.3f}秒")
                
                # 定期保存进度和清理内存
                if (i + 1) % batch_size == 0 or (i + 1) == len(remaining_data):
                    current_completed = completed + i + 1
                    progress = {
                        "session_id": timestamp,
                        "total_items": total_items,
                        "completed_items": current_completed,
                        "progress_percentage": (current_completed / total_items) * 100,
                        "predictions_file": predictions_file,
                        "last_update": datetime.now().isoformat(),
                        "status": "generating",
                        "avg_time_per_item": generation_time,
                        "data_source": data_path
                    }
                    
                    with open(progress_file, 'w', encoding='utf-8') as pf:
                        json.dump(progress, pf, indent=2, ensure_ascii=False)
                    
                    print(f"进度已保存: {current_completed}/{total_items} ({progress['progress_percentage']:.1f}%) 已完成")
                    
                    # 清理内存
                    if hasattr(model, 'cleanup_chinese_memory'):
                        try:
                            model.cleanup_chinese_memory()
                        except Exception as cleanup_error:
                            print(f"内存清理警告: {cleanup_error}")
                
            except Exception as e:
                print(f"处理第 {completed + i} 项时出错: {e}")
                # 保存错误记录
                error_result = {
                    "id": completed + i,
                    "interaction_id": item.get("interaction_id", completed + i),
                    "query": item.get("query", ""),
                    "ground_truth": str(item.get("answer", "")),
                    "prediction": "error",
                    "error": str(e),
                    "timestamp": datetime.now().isoformat(),
                    "generation_time": 0
                }
                f.write(json.dumps(error_result, ensure_ascii=False) + '\n')
                f.flush()
                continue
    
    # 最终进度更新
    final_progress = {
        "session_id": timestamp,
        "total_items": total_items,
        "completed_items": total_items,
        "progress_percentage": 100.0,
        "predictions_file": predictions_file,
        "completion_time": datetime.now().isoformat(),
        "status": "completed",
        "data_source": data_path
    }
    
    with open(progress_file, 'w', encoding='utf-8') as pf:
        json.dump(final_progress, pf, indent=2, ensure_ascii=False)
    
    print(f"\n预测生成完成!")
    print(f"结果保存至: {predictions_file}")
    print(f"进度保存至: {progress_file}")
    
    return predictions_file, timestamp

def analyze_dataset(data_path):
    """分析数据集基本信息"""
    print("正在分析数据集...")
    data = load_data(data_path)
    
    total_items = len(data)
    domains = {}
    question_types = {}
    multi_turn_count = 0
    
    for item in data:
        # 统计领域分布
        domain = item.get("domain", "unknown")
        domains[domain] = domains.get(domain, 0) + 1
        
        # 统计问题类型分布
        q_type = item.get("question_type", "unknown")
        question_types[q_type] = question_types.get(q_type, 0) + 1
        
        # 统计多轮对话
        if isinstance(item.get("query"), list):
            multi_turn_count += 1
    
    print(f"\n=== 数据集分析结果 ===")
    print(f"总项目数: {total_items}")
    print(f"多轮对话数: {multi_turn_count}")
    print(f"单轮对话数: {total_items - multi_turn_count}")
    
    print(f"\n领域分布:")
    for domain, count in domains.items():
        print(f"  {domain}: {count} ({count/total_items*100:.1f}%)")
    
    print(f"\n问题类型分布:")
    for q_type, count in question_types.items():
        print(f"  {q_type}: {count} ({count/total_items*100:.1f}%)")
    
    return {
        "total_items": total_items,
        "domains": domains,
        "question_types": question_types,
        "multi_turn_count": multi_turn_count
    }

if __name__ == "__main__":
    # 修正导入路径
    try:
        from models.user_config import ChineseRAGModel  # 使用正确的类名
    except ImportError:
        # 如果上面的导入失败，尝试其他可能的路径
        try:
            from user_config import ChineseRAGModel
        except ImportError:
            print("错误: 无法导入ChineseRAGModel类")
            print("请检查模型类的导入路径")
            exit(1)
    
    # 配置
    DATASET_PATH = "../data/chinese_rag_dataset_1000.jsonl"
    BATCH_SIZE = 5  # 减少批处理大小以避免内存问题
    
    # 首先分析数据集
    dataset_info = analyze_dataset(DATASET_PATH)
    
    print("\n初始化模型...")
    try:
        model = ChineseRAGModel()
    except Exception as model_init_error:
        print(f"模型初始化失败: {model_init_error}")
        exit(1)
    
    # 生成预测结果
    try:
        predictions_file, session_id = generate_predictions_only(
            DATASET_PATH, 
            model, 
            batch_size=BATCH_SIZE
        )
        
        print(f"\n{'='*60}")
        print("中文RAG预测生成会话完成")
        print(f"{'='*60}")
        print(f"会话ID: {session_id}")
        print(f"预测文件: {predictions_file}")
        print(f"数据集信息: {dataset_info['total_items']} 项, {dataset_info['multi_turn_count']} 个多轮对话")
        print(f"下一步: 运行评估脚本")
        print(f"命令: python test_evaluation_only.py --file {predictions_file}")
        
    except Exception as e:
        print(f"测试过程中发生错误: {e}")
        import traceback
        traceback.print_exc()
    finally:
        # 确保清理资源
        try:
            if 'model' in locals():
                model.cleanup_chinese_memory()
        except:
            pass


def generate_predictions_only(data_path, model, batch_size=10):
    """生成中文RAG预测结果"""
    print("开始生成预测结果...")
    
    # 创建会话文件
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    predictions_file = f"chinese_predictions_{timestamp}.jsonl"
    progress_file = f"chinese_progress_{timestamp}.json"
    
    # 加载数据
    print(f"从以下路径加载数据: {data_path}")
    data = load_data(data_path)
    total_items = len(data)
    print(f"总共需要处理的项目数: {total_items}")
    
    # 检查现有进度
    completed = count_completed_predictions(predictions_file)
    if completed > 0:
        print(f"发现现有进度: {completed} 项已完成")
        print("从上次中断处继续...")
    
    # 处理剩余项目
    remaining_data = data[completed:]
    
    with open(predictions_file, 'a', encoding='utf-8') as f:
        for i, item in enumerate(tqdm(remaining_data, desc="生成预测结果", initial=completed, total=total_items)):
            try:
                # 提取项目数据
                current_query, ground_truth = handle_multi_turn_query(item)
                search_results = item.get("search_results", [])
                query_time = item.get("query_time", datetime.now().isoformat())
                domain = item.get("domain", "")
                question_type = item.get("question_type", "")
                
                current_index = completed + i
                
                print(f"\n[{current_index + 1}/{total_items}] 处理查询: {current_query[:50]}...")
                print(f"领域: {domain}, 问题类型: {question_type}")
                
                # 生成预测
                start_time = time.time()
                prediction = model.chinese_generate_answer(current_query, search_results, query_time)
                generation_time = time.time() - start_time
                
                # 清理和修剪预测结果
                prediction_clean = str(prediction).strip() if prediction else ""
                prediction_trimmed = trim_prediction(prediction_clean)
                
                # 创建结果记录
                result = {
                    "id": current_index,
                    "interaction_id": item.get("interaction_id", current_index),
                    "domain": domain,
                    "question_type": question_type,
                    "query": current_query,
                    "ground_truth": ground_truth,
                    "prediction": prediction_trimmed,
                    "raw_prediction": prediction_clean,
                    "generation_time": round(generation_time, 3),
                    "timestamp": datetime.now().isoformat(),
                    "metadata": {
                        "has_search_results": len(search_results) > 0,
                        "search_results_count": len(search_results),
                        "query_length": len(current_query),
                        "prediction_length": len(prediction_clean) if prediction_clean else 0,
                        "is_multi_turn": isinstance(item.get("query"), list),
                        "conversation_type": item.get("conversation_type", "single"),
                        "total_turns": item.get("total_turns", 1)
                    }
                }
                
                # 立即保存结果
                f.write(json.dumps(result, ensure_ascii=False) + '\n')
                f.flush()
                
                print(f"标准答案: {ground_truth[:100]}...")
                print(f"预测结果: {prediction_trimmed[:100]}...")
                print(f"生成时间: {generation_time:.3f}秒")
                
                # 定期保存进度
                if (i + 1) % batch_size == 0 or (i + 1) == len(remaining_data):
                    current_completed = completed + i + 1
                    progress = {
                        "session_id": timestamp,
                        "total_items": total_items,
                        "completed_items": current_completed,
                        "progress_percentage": (current_completed / total_items) * 100,
                        "predictions_file": predictions_file,
                        "last_update": datetime.now().isoformat(),
                        "status": "generating",
                        "avg_time_per_item": generation_time,
                        "data_source": data_path
                    }
                    
                    with open(progress_file, 'w', encoding='utf-8') as pf:
                        json.dump(progress, pf, indent=2, ensure_ascii=False)
                    
                    print(f"进度已保存: {current_completed}/{total_items} ({progress['progress_percentage']:.1f}%) 已完成")
                
            except Exception as e:
                print(f"处理第 {completed + i} 项时出错: {e}")
                # 保存错误记录
                error_result = {
                    "id": completed + i,
                    "interaction_id": item.get("interaction_id", completed + i),
                    "query": item.get("query", ""),
                    "ground_truth": str(item.get("answer", "")),
                    "prediction": "error",
                    "error": str(e),
                    "timestamp": datetime.now().isoformat()
                }
                f.write(json.dumps(error_result, ensure_ascii=False) + '\n')
                f.flush()
                continue
    
    # 最终进度更新
    final_progress = {
        "session_id": timestamp,
        "total_items": total_items,
        "completed_items": total_items,
        "progress_percentage": 100.0,
        "predictions_file": predictions_file,
        "completion_time": datetime.now().isoformat(),
        "status": "completed",
        "data_source": data_path
    }
    
    with open(progress_file, 'w', encoding='utf-8') as pf:
        json.dump(final_progress, pf, indent=2, ensure_ascii=False)
    
    print(f"\n预测生成完成!")
    print(f"结果保存至: {predictions_file}")
    print(f"进度保存至: {progress_file}")
    
    return predictions_file, timestamp

def analyze_dataset(data_path):
    """分析数据集基本信息"""
    print("正在分析数据集...")
    data = load_data(data_path)
    
    total_items = len(data)
    domains = {}
    question_types = {}
    multi_turn_count = 0
    
    for item in data:
        # 统计领域分布
        domain = item.get("domain", "unknown")
        domains[domain] = domains.get(domain, 0) + 1
        
        # 统计问题类型分布
        q_type = item.get("question_type", "unknown")
        question_types[q_type] = question_types.get(q_type, 0) + 1
        
        # 统计多轮对话
        if isinstance(item.get("query"), list):
            multi_turn_count += 1
    
    print(f"\n=== 数据集分析结果 ===")
    print(f"总项目数: {total_items}")
    print(f"多轮对话数: {multi_turn_count}")
    print(f"单轮对话数: {total_items - multi_turn_count}")
    
    print(f"\n领域分布:")
    for domain, count in domains.items():
        print(f"  {domain}: {count} ({count/total_items*100:.1f}%)")
    
    print(f"\n问题类型分布:")
    for q_type, count in question_types.items():
        print(f"  {q_type}: {count} ({count/total_items*100:.1f}%)")
    
    return {
        "total_items": total_items,
        "domains": domains,
        "question_types": question_types,
        "multi_turn_count": multi_turn_count
    }

if __name__ == "__main__":
    from models.user_config import ChineseUserModel
    
    # 配置
    DATASET_PATH = "../data/chinese_rag_dataset_1000.jsonl"
    BATCH_SIZE = 10  # 每N项保存一次进度（中文处理可能较慢）
    
    # 首先分析数据集
    dataset_info = analyze_dataset(DATASET_PATH)
    
    print("\n初始化模型...")
    model = ChineseUserModel()
    
    # 生成预测结果
    predictions_file, session_id = generate_predictions_only(
        DATASET_PATH, 
        model, 
        batch_size=BATCH_SIZE
    )
    
    print(f"\n{'='*60}")
    print("中文RAG预测生成会话完成")
    print(f"{'='*60}")
    print(f"会话ID: {session_id}")
    print(f"预测文件: {predictions_file}")
    print(f"数据集信息: {dataset_info['total_items']} 项, {dataset_info['multi_turn_count']} 个多轮对话")
    print(f"下一步: 运行评估脚本")
    print(f"命令: python test_evaluation_only.py --file {predictions_file}")
