#!/usr/bin/env python3
"""
基于 MapReduce 的长文本摘要提取，使用统一配置管理
处理长文本，生成指定长度的摘要
"""

import sys
import os
import math
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from dataclasses import dataclass
from typing import List, Dict, Optional
import asyncio

# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from llm import quick_chat
from .config import LLM_CONFIG, USE_SILICONFLOW

# 配置常量
CHUNK_SIZE = 10000  # 块大小，适配模型上下文
MAX_WORKERS = 8  # 增加线程数以提高并发性能

# 数据类用于存储摘要结果和时间统计（内部使用）
@dataclass
class SummaryResult:
    summary_text: str
    original_length: int
    summary_length: int
    compression_ratio: float
    timing: Dict[str, float]  # 存储各阶段耗时

def split_text(text, chunk_size=CHUNK_SIZE):
    """将文本拆分成小块"""
    start_time = time.time()
    chunks = []
    for i in range(0, len(text), chunk_size):
        chunks.append(text[i:i + chunk_size])
    return chunks, time.time() - start_time

def generate_summary(text, prompt_template, max_tokens=300, temperature=0.3):
    """使用统一配置的 LLM 生成摘要"""
    prompt = prompt_template.format(text=text)
    start_time = time.time()
    
    try:
        # 根据全局配置选择配置
        if USE_SILICONFLOW:
            base_url = LLM_CONFIG["BASE_URL_dev"]
            model = LLM_CONFIG["MODEL_dev"]
            api_key = LLM_CONFIG["API_KEY"]
        else:
            base_url = LLM_CONFIG["BASE_URL_1238"]
            model = LLM_CONFIG["MODEL_14B"]
            api_key = None
        
        # 根据配置决定是否传递API密钥
        if USE_SILICONFLOW:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt="You are a helpful assistant",
                user_prompt=prompt,
                api_key=api_key,
                max_tokens=max_tokens,
                temperature=temperature
            )
        else:
            result = quick_chat(
                base_url=base_url,
                model=model,
                system_prompt="You are a helpful assistant",
                user_prompt=prompt,
                max_tokens=max_tokens,
                temperature=temperature
            )
        
        summary = result.text.strip()
        return summary, time.time() - start_time
    except Exception as e:
        print(f"API调用错误: {e}")
        return "", time.time() - start_time

def map_chunk(chunk, prompt_template):
    """处理单个块的 Map 操作"""
    return generate_summary(chunk, prompt_template, max_tokens=200)

async def async_generate_summary(text, prompt_template, max_tokens=300, temperature=0.3):
    """异步版本的摘要生成函数"""
    prompt = prompt_template.format(text=text)
    start_time = time.time()
    
    try:
        # 根据全局配置选择配置
        if USE_SILICONFLOW:
            base_url = LLM_CONFIG["BASE_URL_dev"]
            model = LLM_CONFIG["MODEL_dev"]
            api_key = LLM_CONFIG["API_KEY"]
        else:
            base_url = LLM_CONFIG["BASE_URL_1238"]
            model = LLM_CONFIG["MODEL_14B"]
            api_key = None
        
        # 在线程池中运行同步的API调用
        loop = asyncio.get_event_loop()
        with ThreadPoolExecutor() as executor:
            if USE_SILICONFLOW:
                result = await loop.run_in_executor(
                    executor,
                    lambda: quick_chat(
                        base_url=base_url,
                        model=model,
                        system_prompt="You are a helpful assistant",
                        user_prompt=prompt,
                        api_key=api_key,
                        max_tokens=max_tokens,
                        temperature=temperature
                    )
                )
            else:
                result = await loop.run_in_executor(
                    executor,
                    lambda: quick_chat(
                        base_url=base_url,
                        model=model,
                        system_prompt="You are a helpful assistant",
                        user_prompt=prompt,
                        max_tokens=max_tokens,
                        temperature=temperature
                    )
                )
        
        summary = result.text.strip()
        return summary, time.time() - start_time
    except Exception as e:
        print(f"API调用错误: {e}")
        return "", time.time() - start_time

async def async_map_chunk(chunk, prompt_template, chunk_index):
    """异步处理单个块的 Map 操作"""
    summary, chunk_time = await async_generate_summary(chunk, prompt_template, max_tokens=200)
    return chunk_index, summary, chunk_time

def map_reduce_summarize(text, target_length=500, return_timing=False):
    """MapReduce 摘要主函数，返回处理后的文本
    
    Args:
        text: 输入的长文本
        target_length: 目标摘要长度
        return_timing: 是否返回时间统计信息
    
    Returns:
        如果 return_timing=False: 返回摘要文本字符串
        如果 return_timing=True: 返回 (摘要文本, 时间统计字典) 元组
    """
    total_start_time = time.time()
    timing = {
        "split": 0.0,
        "map": 0.0,
        "reduce": 0.0,
        "recursive_reduce": 0.0,
        "total": 0.0
    }

    # 步骤 1: 检查文本长度
    if len(text) <= CHUNK_SIZE:
        prompt = "请生成以下文本的简洁摘要：\n\n{text}"
        summary, reduce_time = generate_summary(text, prompt, max_tokens=target_length // 4)
        timing["reduce"] = reduce_time
        timing["total"] = time.time() - total_start_time
        
        if return_timing:
            return summary, timing
        return summary

    # 步骤 2: 拆分成块
    chunks, split_time = split_text(text)
    timing["split"] = split_time
    print(f"拆分成 {len(chunks)} 个块，耗时: {split_time:.2f}秒")

    # 步骤 3: Map 阶段 - 真正的并行生成局部摘要
    map_prompt = "请对以下内容生成简洁的摘要，突出要点：\n\n{text}"
    partial_summaries = []
    map_start_time = time.time()
    
    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        # 提交所有任务
        future_to_chunk = {
            executor.submit(map_chunk, chunk, map_prompt): i 
            for i, chunk in enumerate(chunks)
        }
        
        # 使用 as_completed 来获取完成的任务，实现真正的并行
        for future in as_completed(future_to_chunk):
            chunk_index = future_to_chunk[future]
            try:
                summary, chunk_time = future.result()
                if summary:
                    partial_summaries.append((chunk_index, summary))
                    print(f"块 {chunk_index} 摘要: {summary[:50]}... 耗时: {chunk_time:.2f}秒")
            except Exception as e:
                print(f"块 {chunk_index} 处理失败: {e}")
    
    # 按原始顺序排序摘要
    partial_summaries.sort(key=lambda x: x[0])
    summaries_text = [summary for _, summary in partial_summaries]
    
    timing["map"] = time.time() - map_start_time
    print(f"Map阶段总耗时: {timing['map']:.2f}秒")

    # 步骤 4: Reduce 阶段 - 合并摘要
    combined_summaries = "\n\n".join(summaries_text)
    reduce_prompt = "请将以下多个摘要合并成一个流畅的最终摘要，长度控制在{length}字左右：\n\n{text}"
    final_summary, reduce_time = generate_summary(combined_summaries, reduce_prompt.format(length=target_length, text="{text}"), max_tokens=target_length // 2)
    timing["reduce"] = reduce_time
    print(f"Reduce阶段耗时: {timing['reduce']:.2f}秒")

    # 步骤 5: 递归 Reduce（如果需要）
    recursive_reduce_time = 0.0
    if len(final_summary) > target_length * 1.5:
        print("最终摘要过长，进行递归Reduce...")
        recursive_start_time = time.time()
        if return_timing:
            result, _ = map_reduce_summarize(final_summary, target_length, return_timing=True)
        else:
            result = map_reduce_summarize(final_summary, target_length)
        recursive_reduce_time = time.time() - recursive_start_time
        final_summary = result
        timing["recursive_reduce"] = recursive_reduce_time

    timing["total"] = time.time() - total_start_time
    print(f"摘要处理总耗时: {timing['total']:.2f}秒")
    
    if return_timing:
        return final_summary, timing
    return final_summary

async def async_map_reduce_summarize(text, target_length=500, return_timing=False):
    """异步版本的 MapReduce 摘要函数
    
    Args:
        text: 输入的长文本
        target_length: 目标摘要长度
        return_timing: 是否返回时间统计信息
    
    Returns:
        如果 return_timing=False: 返回摘要文本字符串
        如果 return_timing=True: 返回 (摘要文本, 时间统计字典) 元组
    """
    total_start_time = time.time()
    timing = {
        "split": 0.0,
        "map": 0.0,
        "reduce": 0.0,
        "recursive_reduce": 0.0,
        "total": 0.0
    }

    # 步骤 1: 检查文本长度
    if len(text) <= CHUNK_SIZE:
        prompt = "请生成以下文本的简洁摘要：\n\n{text}"
        summary, reduce_time = await async_generate_summary(text, prompt, max_tokens=target_length // 4)
        timing["reduce"] = reduce_time
        timing["total"] = time.time() - total_start_time
        
        if return_timing:
            return summary, timing
        return summary

    # 步骤 2: 拆分成块
    chunks, split_time = split_text(text)
    timing["split"] = split_time
    print(f"拆分成 {len(chunks)} 个块，耗时: {split_time:.2f}秒")

    # 步骤 3: Map 阶段 - 异步并行生成局部摘要
    map_prompt = "请对以下内容生成简洁的摘要，突出要点：\n\n{text}"
    map_start_time = time.time()
    
    # 创建异步任务列表
    tasks = []
    for i, chunk in enumerate(chunks):
        task = async_map_chunk(chunk, map_prompt, i)
        tasks.append(task)
    
    # 并行执行所有任务
    results = await asyncio.gather(*tasks, return_exceptions=True)
    
    # 处理结果
    partial_summaries = []
    for i, result in enumerate(results):
        if isinstance(result, Exception):
            print(f"块 {i} 处理失败: {result}")
        elif result and result[1]:  # result[1] 是摘要文本
            partial_summaries.append((i, result[1]))
            print(f"块 {i} 摘要: {result[1][:50]}... 耗时: {result[2]:.2f}秒")
    
    # 按原始顺序排序摘要
    partial_summaries.sort(key=lambda x: x[0])
    summaries_text = [summary for _, summary in partial_summaries]
    
    timing["map"] = time.time() - map_start_time
    print(f"Map阶段总耗时: {timing['map']:.2f}秒")

    # 步骤 4: Reduce 阶段 - 合并摘要
    combined_summaries = "\n\n".join(summaries_text)
    reduce_prompt = "请将以下多个摘要合并成一个流畅的最终摘要，长度控制在{length}字左右：\n\n{text}"
    final_summary, reduce_time = await async_generate_summary(
        combined_summaries, 
        reduce_prompt.format(length=target_length, text="{text}"), 
        max_tokens=target_length // 2
    )
    timing["reduce"] = reduce_time
    print(f"Reduce阶段耗时: {timing['reduce']:.2f}秒")

    # 步骤 5: 递归 Reduce（如果需要）
    recursive_reduce_time = 0.0
    if len(final_summary) > target_length * 1.5:
        print("最终摘要过长，进行递归Reduce...")
        recursive_start_time = time.time()
        if return_timing:
            result, _ = await async_map_reduce_summarize(final_summary, target_length, return_timing=True)
        else:
            result = await async_map_reduce_summarize(final_summary, target_length)
        recursive_reduce_time = time.time() - recursive_start_time
        final_summary = result
        timing["recursive_reduce"] = recursive_reduce_time

    timing["total"] = time.time() - total_start_time
    print(f"摘要处理总耗时: {timing['total']:.2f}秒")
    
    if return_timing:
        return final_summary, timing
    return final_summary

# 示例使用
if __name__ == "__main__":
    # 模拟 20 万字文本
    long_text = """
    近年来，人工智能技术发展迅速，特别是在自然语言处理领域取得了突破性进展。大型语言模型如GPT、BERT等的出现，极大地推动了机器对人类语言理解和生成能力的提升。
    在技术层面，Transformer架构的提出是一个重要里程碑。它通过自注意力机制，能够更好地捕捉文本中的长距离依赖关系。这种架构不仅在机器翻译任务上表现出色，在文本摘要、问答系统、对话生成等多个NLP任务上也展现了强大的能力。
    从应用角度来看，AI技术已经深入到各个行业。在医疗领域，AI辅助诊断系统能够帮助医生更准确地识别疾病；在金融领域，智能风控系统可以实时监测交易风险；在教育领域，个性化学习系统为学生提供定制化的学习体验。
    然而，AI技术的快速发展也带来了一些挑战。数据隐私保护、算法偏见、就业影响等问题需要我们认真思考和应对。如何在享受AI技术带来便利的同时，确保其安全、公平、可持续发展，是当前面临的重要课题。
    展望未来，AI技术将继续快速发展。多模态AI、量子计算与AI的结合、AI伦理治理等将成为重要发展方向。我们需要在技术创新与社会责任之间找到平衡，推动AI技术更好地服务人类社会。
    """ * 400  # 重复 400 次，约 20 万字
    long_text = long_text[:200000]  # 截断到 20 万字
    
    try:
        # 测试同步版本
        print("=== 测试同步版本 ===")
        result, timing = map_reduce_summarize(long_text, target_length=500, return_timing=True)
        print("\n最终摘要：\n", result)
        print("\n时间统计：")
        print(f"  文本拆分: {timing['split']:.2f}秒")
        print(f"  Map 阶段: {timing['map']:.2f}秒")
        print(f"  Reduce 阶段: {timing['reduce']:.2f}秒")
        print(f"  递归 Reduce: {timing['recursive_reduce']:.2f}秒")
        print(f"  总耗时: {timing['total']:.2f}秒")
        print(f"原文长度: {len(long_text)} 字符")
        print(f"摘要长度: {len(result)} 字符")
        print(f"压缩比: {len(result) / len(long_text):.1%}")
        
        # 测试异步版本
        print("\n=== 测试异步版本 ===")
        async def test_async():
            result_async = await async_map_reduce_summarize(long_text, target_length=500)
            print(f"异步版本摘要长度: {len(result_async)} 字符")
            return result_async
        
        result_async = asyncio.run(test_async())
        
    except Exception as e:
        print(f"处理失败: {e}")