import concurrent.futures
import asyncio
import inspect
import copy
import time
from typing import Any, Callable, Dict, List, Tuple, Union, Optional

from llmlingua import PromptCompressor

from agents.two_stage_outline_agent import two_stage_generate_outline
from my_utils.data_io import load_files


def parallel_compressor(
        uncompressed_text_list: List[str],
        compression_rate: float = 0.7,
        model_path: str = "./bert-base-multilingual-cased",
        max_workers: Optional[int] = None
) -> List[str]:
    """ 使用LLMLingua2并行压缩文本列表"""
    # 避免在每次调用时都创建 PromptCompressor 实例
    compressor = PromptCompressor(
        model_name=model_path,
        use_llmlingua2=True
    )

    def compress_single_text(args: Tuple[int, str]) -> Tuple[int, str]:
        """ 压缩单个文本 """
        index, text = args
        result = compressor.compress_prompt_llmlingua2(
            text,
            rate=compression_rate,
            force_tokens=['\n', '.', '!', '?', ',', '，', '。', '！', '？'],
            chunk_end_tokens=['.', '\n', '。', '！', '？'],
            return_word_label=True,
            drop_consecutive=True
        )
        return index, result['compressed_prompt']

    # 使用ThreadPoolExecutor并行处理每个文本的压缩
    compressed_texts = [None] * len(uncompressed_text_list)  # 预分配结果列表

    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有压缩任务，将索引和文本作为元组传递
        future_to_index = {
            executor.submit(compress_single_text, (i, text)): i
            for i, text in enumerate(uncompressed_text_list)
        }

        # 收集结果并按原始索引更新结果列表
        for future in concurrent.futures.as_completed(future_to_index):
            index, compressed_text = future.result()
            compressed_texts[index] = compressed_text

    return compressed_texts



def run_async_in_thread(async_func: Callable, *args, **kwargs):
    """ 在当前线程中创建一个新的事件循环并运行异步函数 """
    return asyncio.run(async_func(*args, **kwargs))

def parallel_compressing_process_with_another(
    uncompressed_text_list: List[str],
    parallel_function: Callable,
    parallel_args: List[Any] = None,
    parallel_kwargs: Dict[str, Any] = None,
    compression_rate: float = 0.7,
    model_path: str = "./bert-base-multilingual-cased",
    max_workers: Optional[int] = None,
) -> Tuple[Any, List[str]]:
    """
    并行执行自定义函数和压缩任务 (处理同步和异步 parallel_function)

    参数:
        uncompressed_text_list: 需要压缩的文本列表
        parallel_function: 要与压缩任务并行执行的函数 (可以是同步或异步)
        parallel_args: 传递给parallel_function的位置参数列表
        parallel_kwargs: 传递给parallel_function的关键字参数字典
        compression_rate: 压缩比率
        model_path: 模型路径
        max_workers: 最大工作线程数（用于整个并行过程，包括压缩和其他函数）

    返回:
        (parallel_function的结果, 压缩后的文本列表)
    """
    if parallel_args is None:
        parallel_args = []
    if parallel_kwargs is None:
        parallel_kwargs = {}

    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 判断 parallel_function 是否是异步函数
        if inspect.iscoroutinefunction(parallel_function):
            # 如果是异步函数，提交一个包装器来运行它
            future_function = executor.submit(run_async_in_thread, parallel_function, *parallel_args, **parallel_kwargs)
        else:
            # 如果是同步函数，直接提交
            future_function = executor.submit(parallel_function, *parallel_args, **parallel_kwargs)

        # 提交压缩任务（parallel_compressor 是同步的）
        future_compress = executor.submit(
            parallel_compressor,
            uncompressed_text_list=uncompressed_text_list,
            compression_rate=compression_rate,
            model_path=model_path,
        )

        # 获取结果
        function_result = future_function.result()
        compressed_texts = future_compress.result()

    return function_result, compressed_texts



# 入口函数示例
if __name__ == "__main__":
    topic = "Latest Advances and Cross-modal Fusion Strategies in Multimodal Learning"
    user_summary_type = "A review of the current research status in a specific field"

    origin_result = load_files(["origin_paper_search_result.json"], root_path="../examples_data")[0]

    start_time = time.time()
    compressed_texts = parallel_compressor(
            uncompressed_text_list=[item['entity']['chunk_text'] for item in origin_result.values()],
            compression_rate = 0.7,
            model_path = "../bert-base-multilingual-cased"
    )
    end_time = time.time()
    print(f"压缩用时：{end_time-start_time} s")

    # 并行执行生成大纲和压缩任务
    (detail_outline, paper_index_dict), compressed_texts = parallel_compressing_process_with_another(
        uncompressed_text_list=[item['entity']['chunk_text'] for item in origin_result.values()],
        parallel_function=two_stage_generate_outline,
        parallel_args=[topic, user_summary_type, origin_result],
        compression_rate=0.7,
        model_path="../bert-base-multilingual-cased",
    )

    # 将压缩后的文本放回原始结果
    compressed_result = copy.deepcopy(origin_result)
    for paper_id, compressed_text in zip(origin_result.keys(), compressed_texts):
        compressed_result[paper_id]['entity']['chunk_text'] = compressed_text
