from vllm import LLM, SamplingParams
import os
import torch
import inspect
from typing import Callable, Optional
from pathos.multiprocessing import ProcessingPool as Pool
from functools import partial
import math
import itertools

from loguru import logger

import sys
sys.path.append('/mnt/user/linzhixin/github/tools')
from utils.utils import is_equal_len, sep_datas_list_equally

def naive_vllm_infer(prompt: str,
                     model: str) -> str:
    """先设定model,SamplingParams,然后提供推理数据"""
    
    vllm_engine = LLM(
                model=model,
                tensor_parallel_size=2,
                trust_remote_code=True,
                enable_prefix_caching=True,
                gpu_memory_utilization=0.3
            )

    sampling_params = SamplingParams(
            n=1,
            best_of=1,
            presence_penalty=0.0,
            frequency_penalty=0.0,
            repetition_penalty=1.0,
            temperature=0.0,
            top_p=1,
            top_k=-1,
            min_p=0.0,
            seed=42,
            use_beam_search=False,
            length_penalty=1.0,
            early_stopping=False,
            stop=[],
            stop_token_ids=[0],
            include_stop_str_in_output=False,
            ignore_eos=False,
            max_tokens=100,
            min_tokens=1,
            logprobs=3,
            # logits_processors=[mask_unlabel_tokens],
            detokenize=True,
            prompt_logprobs=None,
            skip_special_tokens=False,
            spaces_between_special_tokens=True,
            truncate_prompt_tokens=None,
        )
    
    outputs = vllm_engine.generate(
                    [prompt],
                    sampling_params,
                    use_tqdm=False,
                )
    return outputs




"""
    多进程vllm推理
    仅支持单模型
    支持多数据源，支持单输出or多输出路径(要与数据源数量对齐)
"""

import os
import torch
from pathos.multiprocessing import ProcessingPool as Pool
from vllm import LLM, SamplingParams
from tqdm import tqdm
from dataclasses import dataclass

@dataclass
class VllmConfig:
    def __init__(self, 
                 vllm_config: dict,
                 sampling_params: dict,
                 device: int
                 ):
        self.vllm_config: dict = vllm_config
        self.sampling_params: dict = sampling_params
        self.vllm_config: dict = vllm_config
        self.device: int = device
    
        
class VllmManager:   
    def __init__(self,
                 vllm_config: VllmConfig):
        self.vllm_config = vllm_config
        self.vllm_engine = None 
        self.sampling_params = None
    
    def get_device(self) -> int:
        return self.vllm_config.device
        
    def get_generator(self):
        # print('get_generator from ',self.vllm_config)
        self._init()
        return self.vllm_engine, self.sampling_params
    
    def _set_device(self):
        device = self.get_device()
        if isinstance(device, int):
            device = str(device)
        elif isinstance(device, str):
            pass
        else:
            raise ValueError(f'wrong device type. get {device}.')
        import os 
        logger.info(f'set device: {device}')
        os.environ["CUDA_VISIBLE_DEVICES"] = str(device)
            
    def _init(self):
        if self.vllm_engine == None: 
            logger.info('初始化vllm_engine')
            self._set_device()
            vllm_config = self.vllm_config.vllm_config
            logger.info(f'vllm_config:{vllm_config}')
            self.vllm_engine = LLM(**vllm_config)
            
        if self.sampling_params == None:
            logger.info('初始化sampling_params')
            self.sampling_params = SamplingParams(**self.vllm_config.sampling_params)
        
    def generate(self, 
                 prompt: str | list[str],
                 **kwargs):
        self._init()
        result = self.vllm_engine(prompt, 
                         sampling_params=self.sampling_params,
                         **kwargs)
        return result
        
class MultiProcessVllmRunner:
    default_vllm_config :dict = dict(
                tensor_parallel_size=1,
                trust_remote_code=True,
                enable_prefix_caching=True,
                gpu_memory_utilization=0.5
            )
            
    default_sampling_params : dict = dict(
                n=1,
                best_of=1,
                presence_penalty=0.0,
                frequency_penalty=0.0,
                repetition_penalty=1.0,
                temperature=0.0,
                top_p=1,
                top_k=-1,
                min_p=0.0,
                seed=42,
                stop=[],
                stop_token_ids=[0],
                include_stop_str_in_output=False,
                ignore_eos=False,
                max_tokens=1,
                min_tokens=0,
                logprobs=3,
                detokenize=True,
                prompt_logprobs=None,
                skip_special_tokens=False,
                spaces_between_special_tokens=True,
                truncate_prompt_tokens=None,
            )
    
    def __init__(self, 
                 multi_vllm_config: list[VllmConfig]
                 ):
        self.num_process = len(multi_vllm_config)
        self.device_list = [mvc.device for mvc in multi_vllm_config]
        self.multi_vllm_manager = [VllmManager(mvc)
                                   for mvc in multi_vllm_config]
        
    def run(self, 
            split: list[list]=[[]],
            constant: list=[],
            func: Callable=None,
            use_tqdm=False) -> list[str]:
        """
            datas_list: 
                包含一系列需要输入的数据，例如prompt_list和groud_truth_lis,
                则datas_list=[prompt_list, ground_list].
            func:
                使用自定义函数作为单条数据的推理函数

        """
        # if not is_equal_len(datas_list):
        #     raise ValueError(
        #         '数据长度不一致'
        #     )
        # print(len(datas_list), len(datas_list[0]))
        datas_list_list = sep_datas_list_equally(split, self.num_process)
        split = []
        for i in range(self.num_process):
            tmp_split = []
            for datas_list in datas_list_list:
                tmp_split.append(datas_list[i])
            split.append(tmp_split)
        if func:
            with Pool(self.num_process) as pool:
                # print(len(datas_list_list), len(datas_list_list[0]), len(datas_list_list[0][0]))
                results_list = pool.map(func, split, [constant]*self.num_process, self.multi_vllm_manager, [use_tqdm]*self.num_process)
        else:
            with Pool(self.num_process) as pool:
                results_list = pool.map(self._run_func, datas_list_list, [constant]*self.num_process, self.multi_vllm_manager, [use_tqdm]*self.num_process)
        return list(itertools.chain(*results_list))
             
    def _run_func(self, 
                  datas_list: Optional[list[str]], 
                  manager: VllmManager) -> list[str]:
        if datas_list == []:
            return []
        vllm_engine, sampling_params = manager.get_generator()
        return vllm_engine.generate(datas_list,
                           sampling_params=sampling_params)
    
        
    @classmethod
    def get_multiprocess_vllm(cls, 
                            n: int=1,
                            device_list: list[int] = None,
                            vllm_config: dict = {},
                            sampling_params: dict = {},
                            **kwargs
                            ):

        def update_config(config: dict, kwargs: dict) -> dict:
            for key, value in kwargs.items():
                config[key] = value
            return config
            
        vllm_config = update_config(cls.default_vllm_config, vllm_config)
        sampling_params = update_config(cls.default_sampling_params, sampling_params)
        
        _multi_vllm_config = kwargs.get('multi_vllm_config', vllm_config)
        if isinstance(_multi_vllm_config, list) and \
            all(isinstance(mvc, dict) for mvc in _multi_vllm_config):
                pass 
        elif isinstance(_multi_vllm_config, dict):
            _multi_vllm_config = [_multi_vllm_config] * n 
        elif not _multi_vllm_config:
            _multi_vllm_config = [dict()] * n
        else:
            raise ValueError('错误')

        if n and not device_list:
            device_list = [i for i in range(n)]
        elif not n and device_list:
            n = len(device_list)
        elif n and device_list:
            if n != len(device_list):
                logger.warning(f'MultiProcessVllmRunner 接收到n={n}, device_list={device_list}，长度不同。'
                               '将以n为标准进行推理。')
                device_list = [i for i in range(n)]
        else:
            raise ValueError(f'必须提供n或者device_list！')
            
        if len(_multi_vllm_config) != len(device_list):
            raise ValueError('设定的vllm_config数量与多进程数量不匹配，'
                             f'得到{len(_multi_vllm_config)}个和{len(device_list)}个')
        multi_vllm_config = [VllmConfig(vllm_config,
                                        sampling_params,
                                        device)
                             for vllm_config, device in zip(_multi_vllm_config, device_list)]
        return cls(multi_vllm_config)
        
if __name__ == '__main__':
    
    prompt = [
        '<|im_start|>user\n月亮为何有时圆有时弯？<|im_end|><|im_start|>assistant\n因为我们看到月亮被太阳照亮的部分在变化。',
        '<|im_start|>user\n游戏王中“黑暗大法师”是由多少张卡片组成的？<|im_end|><|im_start|>assistant\n黑暗大法师”是由5张卡片组成的。'
    ]
    
    multi_vllm_runner = \
        MultiProcessVllmRunner.get_multiprocess_vllm(n=2,
                                                    device_list=[0,3],
                                                    model='/mnt/public/open_source_model/Qwen2.5/Qwen2.5-3B-Instruct'
                                                    )
        
    multi_vllm_runner.run([prompt])    
        