# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import os
import base64
from typing import List, Dict, Optional, Any
from vllm import LLM, SamplingParams
from PIL import Image
import io

class Reward():
    value_estimate: float = 0
    def __init__(self, value_estimate: float):
        self.value_estimate = value_estimate

class OpenAIRequestOutput:
    """OpenAI输出类，与vLLM RequestOutput兼容"""
    
    def __init__(self, outputs: List['OpenAICompletionOutput']):
        """
        初始化OpenAI请求输出
        
        Args:
            outputs: 完成输出列表
        """
        self.outputs = outputs

class OpenAICompletionOutput:
    """OpenAI完成输出类，与vLLM CompletionOutput兼容"""
    
    def __init__(self, text: str, stop_reason: Optional[str] = None):
        """
        初始化OpenAI完成输出
        
        Args:
            text: 生成的文本
            stop_reason: 停止原因
        """
        self.text = text
        self.stop_reason = stop_reason or "stop"

def _encode_image_to_base64(image_path: str, max_size: List[int] = [1024, 1024]) -> str:
    """
    将图像编码为base64格式
    
    Args:
        image_path: 图像文件路径
        max_size: 最大图像尺寸 [width, height]
        
    Returns:
        base64编码的图像字符串
    """
    try:
        with Image.open(image_path) as img:
            # 调整图像大小
            if img.size[0] > max_size[0] or img.size[1] > max_size[1]:
                img.thumbnail(max_size, Image.Resampling.LANCZOS)
            
            # 转换为RGB格式
            if img.mode != 'RGB':
                img = img.convert('RGB')
            
            # 编码为base64
            buffer = io.BytesIO()
            img.save(buffer, format='JPEG', quality=95)
            image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
            
            return f"data:image/jpeg;base64,{image_base64}"
    except Exception as e:
        raise ValueError(f"无法处理图像 {image_path}: {str(e)}")

def _create_multimodal_message(prompt: str, multimodal_data: Optional[Dict] = None, 
                              config = None) -> List[Dict]:
    """
    创建多模态消息格式
    
    Args:
        prompt: 文本提示
        multimodal_data: 多模态数据
        config: 配置对象
        
    Returns:
        OpenAI API格式的消息列表
    """
    if not multimodal_data or not config or not config.multimodal_enabled:
        # 纯文本模式
        return [{"role": "user", "content": prompt}]
    
    # 多模态模式
    content = []
    
    # 添加文本内容
    content.append({"type": "text", "text": prompt})
    
    # 添加图像内容
    if config.image_field in multimodal_data:
        image_path = multimodal_data[config.image_field]
        
        # 处理相对路径
        if not os.path.isabs(image_path):
            image_path = os.path.join(config.image_root, image_path)
        
        if os.path.exists(image_path):
            image_base64 = _encode_image_to_base64(image_path, config.max_image_size)
            content.append({
                "type": "image_url",
                "image_url": {"url": image_base64}
            })
    
    return [{"role": "user", "content": content}]

def _openai_generate_batch(client, messages_list: List[List[Dict]], sampling_params, 
                          batch_size: int) -> List[OpenAIRequestOutput]:
    """
    OpenAI API批量生成
    
    Args:
        client: OpenAI客户端
        messages_list: 消息列表
        sampling_params: 采样参数
        batch_size: 批次大小
        
    Returns:
        生成结果列表
    """
    results = []
    
    for messages in messages_list:
        # 分批处理，根据max_n_per_request限制
        total_outputs = []
        remaining_n = sampling_params.n
        
        while remaining_n > 0:
            current_n = min(remaining_n, sampling_params.max_n_per_request)
            
            try:
                # 构建API调用参数
                api_params = {
                    "model": sampling_params.model,
                    "messages": messages,
                    "temperature": sampling_params.temperature,
                    "max_tokens": sampling_params.max_tokens,
                    "n": current_n
                }
                
                # 添加stop参数（如果存在）
                if hasattr(sampling_params, 'stop') and sampling_params.stop:
                    api_params["stop"] = sampling_params.stop
                
                response = client.chat.completions.create(**api_params)
                
                # 转换为兼容格式
                for choice in response.choices:
                    completion_output = OpenAICompletionOutput(
                        text=choice.message.content,
                        stop_reason=choice.finish_reason
                    )
                    total_outputs.append(completion_output)
                
                remaining_n -= current_n
                
            except Exception as e:
                # 创建错误输出
                error_output = OpenAICompletionOutput(
                    text=f"Error: {str(e)}",
                    stop_reason="error"
                )
                total_outputs.append(error_output)
                remaining_n -= current_n
        
        # 创建请求输出
        request_output = OpenAIRequestOutput(outputs=total_outputs)
        results.append(request_output)
    
    return results

def _assert_output_compat(outputs: List) -> None:
    """
    验证输出格式兼容性
    
    Args:
        outputs: 输出列表
        
    Raises:
        AssertionError: 如果输出格式不兼容
    """
    for output in outputs:
        assert hasattr(output, 'outputs'), "输出对象必须有 'outputs' 属性"
        for completion in output.outputs:
            assert hasattr(completion, 'text'), "完成对象必须有 'text' 属性"
            assert hasattr(completion, 'stop_reason'), "完成对象必须有 'stop_reason' 属性"

def llm_generate(
    prompts: List[str],
    sampling_params,
    engine,
    multimodal_data: Optional[List[Dict]] = None,
):
    """
    统一生成函数，支持vLLM和OpenAI API
    
    支持MCTSCaption批量优化：可以为每个prompt动态设置不同的生成数量。
    
    Args:
        prompts: 提示词列表
        sampling_params: 采样参数
        engine: 推理引擎
        multimodal_data: 多模态数据列表（可选），可包含每个prompt的采样数量信息
        
    Returns:
        生成结果列表
    """
    if not prompts: 
        return []
    
    # 检查是否为OpenAI模式
    if hasattr(sampling_params, 'model') and hasattr(engine, 'chat'):
        # OpenAI API模式
        from rstar_deepthink.llms.llm_engine import OpenAISamplingParams
        
        # 获取配置对象（从全局或其他方式）
        config = getattr(engine, '_config', None)
        
        # 创建消息列表
        messages_list = []
        
        for i, prompt in enumerate(prompts):
            mm_data = multimodal_data[i] if multimodal_data and i < len(multimodal_data) else None
            messages = _create_multimodal_message(prompt, mm_data, config)
            messages_list.append(messages)
        
        # 批量生成
        outputs = _openai_generate_batch(engine, messages_list, sampling_params, len(prompts))
        
        # 验证输出兼容性
        _assert_output_compat(outputs)
        
        return outputs
    
    else:
        # vLLM模式
        # 检查是否需要动态调整采样参数
        need_dynamic_sampling = False
        if multimodal_data:
            for mm_data in multimodal_data:
                if isinstance(mm_data, dict) and '_caption_samples' in mm_data:
                    need_dynamic_sampling = True
                    break
        
        if need_dynamic_sampling:
            # 需要为每个prompt单独调用，因为vLLM不支持per-prompt的n参数
            outputs = []
            for i, prompt in enumerate(prompts):
                mm_data = multimodal_data[i] if i < len(multimodal_data) else None
                
                # 检查是否有动态采样数量设置
                if isinstance(mm_data, dict) and '_caption_samples' in mm_data:
                    # 创建动态采样参数
                    from vllm import SamplingParams
                    dynamic_params = SamplingParams(
                        temperature=sampling_params.temperature,
                        top_k=sampling_params.top_k,
                        top_p=sampling_params.top_p,
                        max_tokens=sampling_params.max_tokens,
                        n=mm_data['_caption_samples'],  # 使用动态数量
                        stop=sampling_params.stop,
                        skip_special_tokens=sampling_params.skip_special_tokens,
                        seed=sampling_params.seed,
                    )
                else:
                    dynamic_params = sampling_params
                
                # 单独调用这个prompt
                prompt_outputs = engine.generate([prompt], sampling_params=dynamic_params)
                outputs.extend(prompt_outputs)
        else:
            # 标准批量调用
            outputs = engine.generate(prompts, sampling_params=sampling_params)   
         
        # remove duplicate outputs
        for output in outputs:
            outs = output.outputs
            text_set = set()
            filterd_outs = []
            for out in outs:
                if out.text not in text_set:
                    filterd_outs.append(out)
                    text_set.add(out.text)
            output.outputs = filterd_outs
        
        # if touch the <end_of_answer>, only keep the first 
        for output in outputs:
            outs = output.outputs
            end_ans, other_ans = [], []
            for out in outs:
                if out.stop_reason != "<end_of_answer>":
                    other_ans.append(out)
                else:
                    end_ans.append(out)
            if len(end_ans) != 0:
                output.outputs = end_ans[:1]
            else:
                output.outputs = other_ans
        
        return outputs

def prevent_overlength(texts, tokenizer, max_model_len):
    """
    https://github.com/vllm-project/vllm/issues/10794 
    Tokenize the input string and crop it to ensure that the length of each string is strictly less than x_seq.
    """
    # tokenize
    tokenized_texts = [tokenizer.tokenize(text) for text in texts]
    
    # truncate
    truncated_texts = [
        tokenizer.convert_tokens_to_string(tokens[:max_model_len - 1]) if len(tokens) >= max_model_len 
        else tokenizer.convert_tokens_to_string(tokens)
        for tokens in tokenized_texts
    ]
    
    return truncated_texts

def rm_generate(model: LLM, v_head, prompts, tokenizer, max_model_len):
    if not prompts:
        return []
    rewards = []
    batch_size = 2000
    with torch.no_grad():
        for i in range(0, len(prompts), batch_size):
            inputs = [prompt['prefix'] + prompt['text'] for prompt in prompts[i:i+batch_size]]
            batch_outputs = model.encode(prevent_overlength(inputs, tokenizer, max_model_len))
            for output in batch_outputs:
                last_hidden_states = output.outputs.data[-1]
                reward = v_head(last_hidden_states)
                rewards.append(reward.cpu().item())
    rewards = [reward / 5 for reward in rewards] # PPM use 5 as the scale
    rewards = torch.tanh(torch.tensor(rewards)).tolist()
    return [Reward(value_estimate=reward) for reward in rewards]
