#!/usr/bin/env python3
"""
多模态推理生成器主程序

该程序基于配置文件执行多模态MCTS推理，支持图像和文本的联合推理。
与原始GSM8K生成器保持完全一致的输出目录结构和工作流程。

使用方法:
    python run_src/do_multimodal_generate.py --config configs/multimodal_config.yaml
"""

import sys
import os
import argparse
import logging
import time
import json
from pathlib import Path
from typing import List, Dict, Any

# 添加项目根目录到路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from common.config_loader import load_config, config_to_args
from common.multimodal_data_loader import MultimodalDataLoader
from common.utils import fix_seeds
from run_src.multimodal_mcts import multimodal_search_for_answers
from eval_src.Evaluator import GSM8KEvaluator
import jsonlines

# 设置日志级别，防止第三方库输出过多调试信息
logging.getLogger("openai").setLevel(logging.WARNING)
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)
logging.getLogger("openai._base_client").setLevel(logging.WARNING)

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


def process_single_sample(config, sample: Dict[str, Any], sample_id: int) -> Dict[str, Any]:
    """
    处理单个样本
    
    Args:
        config: 配置对象
        sample (Dict[str, Any]): 样本数据
        sample_id (int): 样本ID
        
    Returns:
        Dict[str, Any]: 处理结果
    """
    question = sample.get('question', '')
    image_path = sample.get('image_path')
    gold_answer = sample.get('answer', '')
    
    logger.info(f"处理样本 {sample_id}: {question[:50]}{'...' if len(question) > 50 else ''}")
    
    try:
        # 执行多模态MCTS搜索
        result = multimodal_search_for_answers(
            config=config,
            question=question,
            image_path=image_path,
            question_id=sample_id,
            gt_answer=gold_answer
        )
        
        # 添加样本信息
        result.update({
            'sample_id': sample_id,
            'gold_answer': gold_answer,
            'original_data': sample
        })
        
        logger.info(f"样本 {sample_id} 处理完成")
        return result
        
    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        logger.error(f"样本 {sample_id} 处理失败: {str(e)}")
        logger.error(f"详细错误信息: {error_details}")
        
        return {
            'sample_id': sample_id,
            'question': question,
            'image_path': image_path,
            'gold_answer': gold_answer,
            'error': str(e),
            'error_details': error_details,
            'best_answer': None,
            'has_image': image_path is not None,
            'original_data': sample
        }


def save_statistics(results: List[Dict[str, Any]], output_path: str):
    """
    保存统计信息
    
    Args:
        results (List[Dict[str, Any]]): 结果列表
        output_path (str): 输出文件路径
    """
    total_samples = len(results)
    successful_samples = sum(1 for r in results if 'error' not in r)
    image_samples = sum(1 for r in results if r.get('has_image', False))
    text_only_samples = total_samples - image_samples
    
    # 计算模型统计信息
    total_calls = 0
    total_time = 0
    total_tokens = 0
    
    for result in results:
        if 'model_stats' in result:
            stats = result['model_stats']
            total_calls += stats.get('call_count', 0)
            total_time += stats.get('total_time', 0)
            total_tokens += stats.get('total_tokens', 0)
    
    statistics = {
        'summary': {
            'total_samples': total_samples,
            'successful_samples': successful_samples,
            'failed_samples': total_samples - successful_samples,
            'success_rate': successful_samples / total_samples if total_samples > 0 else 0,
            'image_samples': image_samples,
            'text_only_samples': text_only_samples
        },
        'model_stats': {
            'total_api_calls': total_calls,
            'total_time_seconds': total_time,
            'total_tokens': total_tokens,
            'avg_time_per_call': total_time / max(total_calls, 1),
            'avg_calls_per_sample': total_calls / max(successful_samples, 1)
        },
        'timestamp': time.strftime("%Y-%m-%d %H:%M:%S")
    }
    
    with open(output_path, 'w', encoding='utf-8') as f:
        json.dump(statistics, f, ensure_ascii=False, indent=2)
    
    # 打印统计摘要
    print(f"\n=== 多模态推理统计摘要 ===")
    print(f"总样本数: {total_samples}")
    print(f"成功处理: {successful_samples}")
    print(f"成功率: {statistics['summary']['success_rate']:.2%}")
    print(f"图像样本: {image_samples}")
    print(f"纯文本样本: {text_only_samples}")
    print(f"API调用次数: {total_calls}")
    print(f"总用时: {total_time:.2f}秒")
    print(f"平均每题用时: {total_time/max(successful_samples, 1):.2f}秒")
    print(f"平均每题API调用: {total_calls/max(successful_samples, 1):.1f}次")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='多模态推理生成器')
    parser.add_argument(
        '--config', 
        type=str, 
        required=True,
        help='配置文件路径'
    )
    parser.add_argument(
        '--verbose', 
        action='store_true',
        help='详细输出模式'
    )
    
    args = parser.parse_args()
    
    print("多模态推理生成器启动")
    print(f"配置文件: {args.config}")
    
    # 加载配置
    logger.info(f"加载配置文件: {args.config}")
    config = load_config(args.config)
    
    # 命令行参数覆盖配置
    if args.verbose:
        config.experiment.verbose = True
    
    # 设置日志级别
    if config.experiment.verbose:
        logging.getLogger().setLevel(logging.DEBUG)
        # 但仍然保持第三方库的日志级别为WARNING
        logging.getLogger("openai").setLevel(logging.WARNING)
        logging.getLogger("httpx").setLevel(logging.WARNING)
        logging.getLogger("httpcore").setLevel(logging.WARNING)
        logging.getLogger("openai._base_client").setLevel(logging.WARNING)
    else:
        logging.getLogger().setLevel(getattr(logging, config.output.log_level))
    
    # 设置随机种子
    fix_seeds(config.experiment.seed)
    
    # 转换配置为args格式（这里会创建输出目录）
    args_obj = config_to_args(config)
    
    # 保存配置文件到输出目录
    config_path = os.path.join(args_obj.run_outputs_dir, 'config.json')
    with open(config_path, 'w', encoding='utf-8') as f:
        # 保存原始配置
        import yaml
        yaml.dump(vars(config), f, default_flow_style=False, allow_unicode=True)
    
    logger.info(f"输出目录: {args_obj.run_outputs_dir}")
    logger.info(f"答案保存目录: {args_obj.answer_sheets_dir}")
    
    # 加载数据
    logger.info("开始加载数据集...")
    data_loader = MultimodalDataLoader(config)
    dataset = data_loader.load_dataset()
    
    logger.info(f"数据集加载完成，共 {len(dataset)} 个样本")
    image_count = sum(1 for item in dataset if item.get('has_image', False))
    logger.info(f"图像样本: {image_count}, 纯文本样本: {len(dataset) - image_count}")
    
    # 处理数据范围
    start_idx = config.experiment.start_idx
    end_idx = config.experiment.end_idx
    original_dataset_size = len(dataset)
    max_samples = config.data.max_samples
    
    # 处理数据范围选择
    if start_idx > 0 or end_idx != -1:
        # 如果end_idx是-1，表示到数据集末尾
        if end_idx == -1:
            end_idx = original_dataset_size
        
        # 确保索引在有效范围内
        start_idx = max(0, min(start_idx, original_dataset_size))
        end_idx = max(start_idx, min(end_idx, original_dataset_size))
        
        # 计算可用的索引范围
        available_range = list(range(start_idx, end_idx))
        logger.info(f"数据范围: [{start_idx}, {end_idx}) (共{len(available_range)}个可用索引)")
        
        # 在可用范围内选择样本
        if len(available_range) == 0:
            logger.warning(f"指定范围内没有可用数据，将使用整个数据集")
            selected_samples = dataset[:max_samples]
        else:
            # 从可用范围中选择样本
            num_to_select = min(max_samples, len(available_range))
            # 直接使用available_range中的索引，而不是从0开始
            selected_indices = available_range[:num_to_select]
            # 确保索引在数据集范围内
            valid_indices = [i for i in selected_indices if i < original_dataset_size]
            selected_samples = [dataset[i] for i in valid_indices]
            logger.info(f"从范围 [{start_idx}, {end_idx}) 中选择了索引: {valid_indices}")
            logger.info(f"实际选择的数据ID: {[dataset[i].get('id', f'sample_{i}') for i in valid_indices]}")
        
        dataset = selected_samples
    else:
        # 如果没有指定范围，使用前max_samples个样本
        dataset = dataset[:max_samples]
        logger.info(f"使用前 {len(dataset)} 个样本")
    
    logger.info(f"实际处理样本数: {len(dataset)}")
    
    # 处理样本
    results = []
    start_time = time.time()
    
    for i, sample in enumerate(dataset):
        result = process_single_sample(config, sample, i)
        results.append(result)
        
        # 保存中间结果
        if config.experiment.save_intermediate and (i + 1) % 5 == 0:
            intermediate_path = os.path.join(args_obj.run_outputs_dir, f'intermediate_{i+1}.txt')
            with open(intermediate_path, 'w', encoding='utf-8') as f:
                f.write(f"已处理样本: {i+1}/{len(dataset)}\n")
                f.write(f"成功样本: {sum(1 for r in results if 'error' not in r)}\n")
                f.write(f"失败样本: {sum(1 for r in results if 'error' in r)}\n")
    
    total_time = time.time() - start_time
    logger.info(f"所有样本处理完成，总用时: {total_time:.2f}秒")
    
    # 保存最终统计信息
    stats_path = os.path.join(args_obj.run_outputs_dir, 'final_result.txt')
    save_statistics(results, stats_path)
    
    logger.info("多模态推理生成完成")
    print(f"\n📁 结果目录: {args_obj.run_outputs_dir}")
    print(f"📊 统计信息: {stats_path}")


if __name__ == "__main__":
    main()