#!/usr/bin/env python3
"""
独立运行脚本，避免相对导入问题
使用方法：
python ragen/run_agent.py
或
python -m ragen.run_agent
"""

import sys
import os
from pathlib import Path
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Dict, Any

# 确保项目根目录在Python路径中
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

import hydra
from transformers import AutoTokenizer
from verl import DataProto
from ragen.llm_agent.agent_proxy import ApiCallingWrapperWg, LLMAgentProxy
from ragen.llm_agent.data_collector import TrainingDataCollector
from datetime import datetime


def create_model_proxy(config, model_name: str, tokenizer):
    """为指定模型创建代理"""
    # 创建深度副本以避免多线程问题
    from copy import deepcopy
    from omegaconf import OmegaConf
    temp_config = deepcopy(config)
    # 动态设置 model_name 字段，适应只有 model_names 的配置
    # 临时允许结构修改以添加 model_name 字段
    OmegaConf.set_struct(temp_config, False)
    temp_config.model_config.model_name = model_name
    OmegaConf.set_struct(temp_config, True)
    
    actor_wg = ApiCallingWrapperWg(temp_config, tokenizer)
    proxy = LLMAgentProxy(temp_config, actor_wg, tokenizer)
    return proxy


def run_model_rollout(proxy: LLMAgentProxy, model_name: str, config) -> Dict[str, Any]:
    """运行单个模型的rollout"""
    print(f"🚀 [{model_name}] 开始运行rollout...")
    start_time = time.time()
    
    try:
        # 根据配置中的 generate_type 设置 val 参数
        generate_type = getattr(config, 'generate_type', 'train')
        is_val_mode = (generate_type == 'val')
        print(f"🎯 [{model_name}] 运行模式: {generate_type.upper()} (val={is_val_mode})")
        
        rollouts = proxy.rollout(
            DataProto(
                batch=None, 
                non_tensor_batch=None, 
                meta_info={
                    'eos_token_id': 151645, 
                    'pad_token_id': 151643, 
                    'recompute_log_prob': False, 
                    'do_sample': False, 
                    'validate': True
                }
            ), 
            val=is_val_mode
        )
        
        end_time = time.time()
        duration = end_time - start_time
        
        # 计算指标
        rm_scores = rollouts.batch["rm_scores"]
        metrics = rollouts.meta_info["metrics"]
        avg_reward = rm_scores.sum(-1).mean().item()
        
        print(f"✅ [{model_name}] Rollout完成，耗时: {duration:.2f} 秒，平均奖励: {avg_reward:.4f}")
        
        return {
            'model_name': model_name,
            'success': True,
            'rollouts': rollouts,
            'duration': duration,
            'avg_reward': avg_reward,
            'metrics': metrics,
            'rm_scores': rm_scores
        }
        
    except Exception as e:
        end_time = time.time()
        duration = end_time - start_time
        print(f"❌ [{model_name}] Rollout失败: {e}")
        raise e


def safe_extract_config(obj, default=None):
    """安全地从配置中提取数据，确保可序列化"""
    if obj is None:
        return default or {}
    # 将配置对象转换为标准字典
    try:
        if hasattr(obj, '_content'):  # OmegaConf objects
            return dict(obj._content)
        elif hasattr(obj, 'items'):  # dict-like objects
            return {str(k): v for k, v in obj.items()}
        else:
            return obj if isinstance(obj, (str, int, float, bool, list, dict)) else str(obj)
    except:
        return str(obj)


def save_model_result(result: Dict[str, Any], data_collector: TrainingDataCollector, config, timestamp_str: str):
    """保存单个模型的运行结果"""
    try:
        model_name = result['model_name']
        rollouts = result['rollouts']
        rm_scores = result['rm_scores']
        metrics = result['metrics']

        # 获取模型配置信息
        model_info = config.model_info[model_name]
        actual_model_name = model_info.get('model_name', model_name)

        # 收集消息数据（每个模型独立收集）
        messages_list = rollouts.non_tensor_batch.get("messages_list", [])
        # 清空之前的数据，确保每个模型的数据独立保存
        data_collector.collected_data.clear()
        data_collector.collect_from_messages_list(messages_list)

        model_metadata = {
            "config_model_name": str(model_name),
            "actual_model_name": str(actual_model_name),
            "model_provider": str(model_info.get('provider_name', 'unknown')),
            "base_url": str(model_info.get('base_url', 'unknown')),
            "generation_kwargs": safe_extract_config(model_info.get('generation_kwargs'), {}),
            "extra_body": safe_extract_config(model_info.get('extra_body'), {}),
        }

        sample_count = int(rm_scores.shape[0]) if hasattr(rm_scores, 'shape') else len(messages_list)

        # 为每个模型保存到独立的子目录
        model_timestamp_dir = f"{timestamp_str}_{actual_model_name.replace('/', '_')}"

        data_collector.save_training_data(
            metadata={
                "sample_count": sample_count,
                "metrics": metrics,
                "model_info": model_metadata,
                "avg_reward": result['avg_reward'],
                "duration": result['duration'],
            },
            date_time_dir=model_timestamp_dir,
            model_name=actual_model_name,
        )
        print(f"✅ [{model_name}] 训练数据已保存")

    except Exception as e:
        print(f"⚠️  [{result.get('model_name', '未知模型')}] 保存训练数据失败: {e}")


@hydra.main(version_base=None, config_path="../config", config_name="evaluate_api_llm")
def main(config):
    """主函数：运行多模型并发思考模式"""
    
    print("=== 多模型并发思考模式启动 ===")
    
    # 获取模型列表
    try:
        model_names = config.model_config.model_names
        if isinstance(model_names, str):
            model_names = [model_names]  # 兼容单模型配置
    except AttributeError:
        # 兼容旧版配置格式
        if hasattr(config.model_config, 'model_name'):
            model_names = [config.model_config.model_name]
        else:
            print("❌ 配置错误：未找到 model_names 或 model_name")
            return
    
    print(f"🎯 将同时运行 {len(model_names)} 个模型: {', '.join(model_names)}")
    
    # 显示运行模式
    generate_type = getattr(config, 'generate_type', 'train')
    print(f"📋 运行模式: {generate_type.upper()} ({'验证模式' if generate_type == 'val' else '训练模式'})")
    
    # 初始化数据收集器（由运行脚本统一管理保存逻辑）
    data_collector = TrainingDataCollector(config)
    
    # 检查数据保存配置
    data_config = getattr(config, 'data_saving', None)
    if data_config and data_config.get('enabled', False):
        print(f"✅ 数据保存已启用")
        print(f"📁 保存路径: {data_config.get('save_path', 'training_data')}")
        print(f"📊 训练集占比: {data_config.get('train_ratio', 0.8)}")
    else:
        print("ℹ️  数据保存未启用（可在配置 data_saving.enabled=true 开启）")
    
    # 显示所有模型配置信息
    print(f"\n🤖 模型配置信息:")
    valid_models = []
    for model_name in model_names:
        if model_name not in config.model_info:
            print(f"⚠️  模型 '{model_name}' 未在 model_info 中找到配置，跳过")
            continue
            
        model_info = config.model_info[model_name]
        print(f"\n  📌 {model_name}:")
        print(f"    🔗 API地址: {model_info.get('base_url', 'Default')}")
        print(f"    🏷️  实际模型: {model_info.get('model_name', model_name)}")
        
        # 检查思考模式配置
        if hasattr(model_info, 'extra_body') and model_info.extra_body:
            extra_body = model_info.extra_body
            if extra_body.get('enable_thinking', False):
                print(f"    🧠 思考模式: 启用")
                print(f"    💭 思考预算: {extra_body.get('thinking_budget', '未设置')} tokens")
            else:
                print("    ⚠️  思考模式: 未启用")
        else:
            print("    ⚠️  未找到extra_body配置")
        
        # 检查API密钥
        api_key = model_info.get('api_key', None)
        if api_key and not api_key.startswith("${"):
            print("    🔑 API密钥: 已配置")
        else:
            print("    ⚠️  API密钥: 需要环境变量")
            
        valid_models.append(model_name)
    
    if not valid_models:
        print("❌ 没有有效的模型配置，程序退出")
        return
    
    # 初始化tokenizer
    print(f"\n🔧 初始化模型组件...")
    try:
        tokenizer = AutoTokenizer.from_pretrained(config.actor_rollout_ref.model.path)
        print(f"✅ Tokenizer初始化完成")
        
        # 显示数据收集器状态
        stats = data_collector.get_stats()
        print(f"📊 数据收集器状态: {stats}")
        
    except Exception as e:
        print(f"❌ 初始化失败: {e}")
        print(f"💡 建议：")
        print(f"  1. 检查API密钥是否正确设置")
        print(f"  2. 检查网络连接")
        print(f"  3. 运行配置测试: python ragen/test_config.py")
        import traceback
        traceback.print_exc()
        return
    
    # 创建模型代理
    print(f"\n🏗️  创建 {len(valid_models)} 个模型代理...")
    proxies = {}
    for model_name in valid_models:
        try:
            proxy = create_model_proxy(config, model_name, tokenizer)
            proxies[model_name] = proxy
            print(f"✅ [{model_name}] 代理创建完成")
        except Exception as e:
            print(f"❌ [{model_name}] 代理创建失败: {e}")
            # 不阻止其他模型运行
    
    if not proxies:
        print("❌ 没有成功创建的模型代理，程序退出")
        return
    
    print(f"📈 成功创建 {len(proxies)} 个模型代理")
    
    # 并发运行所有模型
    print(f"\n🚀 开始并发运行 {len(proxies)} 个模型...")
    total_start_time = time.time()
    timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
    
    results = []
    with ThreadPoolExecutor(max_workers=len(proxies)) as executor:
        # 提交所有任务
        future_to_model = {
            executor.submit(run_model_rollout, proxy, model_name, config): model_name 
            for model_name, proxy in proxies.items()
        }
        
        # 收集结果并处理
        for future in as_completed(future_to_model):
            model_name = future_to_model[future]
            try:
                result = future.result()
                results.append(result)
                
                # 如果成功，立即保存数据
                if result.get('success', False) and data_collector.is_enabled():
                    print(f"\n💾 [{model_name}] 运行成功，立即保存训练数据...")
                    save_model_result(result, data_collector, config, timestamp_str)

            except Exception as e:
                print(f"❌ [{model_name}] 执行异常: {e}")
                results.append({
                    'model_name': model_name,
                    'success': False,
                    'error': str(e),
                    'duration': 0
                })
                raise e
    
    total_end_time = time.time()
    total_duration = total_end_time - total_start_time
    
    print(f"\n⏱️  所有模型运行完成，总耗时: {total_duration:.2f} 秒")
    
    # 统计和展示结果
    successful_results = [r for r in results if r.get('success', False)]
    failed_results = [r for r in results if not r.get('success', False)]
    
    print(f"\n📊 总体统计:")
    print(f"成功: {len(successful_results)}/{len(results)} 个模型")
    if failed_results:
        print(f"失败: {len(failed_results)} 个模型 - {[r['model_name'] for r in failed_results]}")
    
    # 展示成功模型的详细结果
    if successful_results:
        print(f"\n📈 模型性能对比:")
        print(f"{'模型名称':<20} {'平均奖励':<12} {'耗时(秒)':<10} {'状态':<8}")
        print("-" * 55)
        
        # 按平均奖励排序
        successful_results.sort(key=lambda x: x.get('avg_reward', 0), reverse=True)
        
        for result in successful_results:
            model_name = result['model_name']
            avg_reward = result.get('avg_reward', 0)
            duration = result.get('duration', 0)
            print(f"{model_name:<20} {avg_reward:<12.4f} {duration:<10.2f} {'✅':<8}")
        
        # 打印最终数据收集统计
        final_stats = data_collector.get_stats()
        print(f"📊 数据收集统计: {final_stats}")
        
        # 显示最佳模型的详细指标
        best_result = successful_results[0]
        print(f"\n🏆 最佳模型 [{best_result['model_name']}] 详细指标:")
        for k, v in best_result['metrics'].items():
            print(f"  {k}: {v}")
        
        print(f"\n✅ 多模型并发运行完成！{len(successful_results)} 个模型的思考数据已保存（若启用）。")
        
    else:
        print(f"\n❌ 所有模型运行均失败")
        for result in failed_results:
            print(f"  [{result['model_name']}]: {result.get('error', '未知错误')}")


if __name__ == "__main__":
    main() 