import openai
from typing import Iterator, Optional
import base64

class LLM:
    def __init__(self, base_url: Optional[str] = None, api_key: Optional[str] = None, modelname: str = "Qwen2.5-7B"):
        if not base_url:
            base_url = "http://127.0.0.1:10528/v1"
        if not api_key:
            api_key = "anykey"
        self.client = openai.OpenAI(
            base_url=base_url,
            api_key=api_key,
        )
        self.modelname = modelname

    def generate_response(self, user_input: str, stream: bool = False) -> str | Iterator[str]:
        response = self.client.chat.completions.create(
            messages=[  
                {"role": "system", "content": ''},
                {"role": "user", "content": user_input}
            ],
            model=self.modelname,
            stream=stream,
        )
        if stream:
            return (chunk.choices[0].delta.content or "" for chunk in response)
        else:
            return response.choices[0].message.content

    def generate_multiturn_response(self, messages: list, stream: bool = False) -> str | Iterator[str]:
        response = self.client.chat.completions.create(
            messages=messages,
            model=self.modelname,
            stream=stream,
        )
        if stream:
            return (chunk.choices[0].delta.content or "" for chunk in response)
        else:
            return response.choices[0].message.content


class VLM:
    def __init__(self, base_url: Optional[str] = None, api_key: Optional[str] = None, modelname: str = "Qwen2.5-VL-7B-Instruct"):
        # 设置默认服务地址和API密钥
        self.base_url = base_url if base_url else "http://127.0.0.1:10528/v1"
        self.api_key = api_key if api_key else "test"
        self.client = openai.OpenAI(base_url=self.base_url, api_key=self.api_key)
        self.modelname = modelname

    def _encode_local_image(self, image_path: str) -> str:
        """将本地图片转换为Base64编码"""
        with open(image_path, "rb") as image_file:
            return base64.b64encode(image_file.read()).decode('utf-8')

    def generate_response(self, user_input: str, image_input: str, is_local_image: bool = True, stream: bool = False) -> str | Iterator[str]:
        """
        生成多模态响应
        :param user_input: 文本输入
        :param image_input: 图片URL或本地路径（需配合is_local_image参数）
        :param is_local_image: 是否为本地图片（默认True）
        :param stream: 是否启用流式返回（默认False）
        """
        try:
            # 构建消息内容
            messages = [
                {"role": "system", "content": "你是一个视觉语言助手，可以理解图片内容并回答相关问题。"},
                {"role": "user", "content": []}
            ]
            
            # 处理图片输入
            if is_local_image:
                # 本地图片转换为Base64格式
                base64_image = self._encode_local_image(image_input)
                image_content = {
                    "type": "image_url",
                    "image_url": {
                        "url": f"data:image/jpeg;base64,{base64_image}"
                    }
                }
            else:
                # 直接使用URL
                image_content = {
                    "type": "image_url",
                    "image_url": {
                        "url": image_input
                    }
                }
            
            # 添加文本和图片内容
            messages[1]["content"].extend([
                {"type": "text", "text": user_input},
                image_content
            ])

            # 调用API生成响应
            response = self.client.chat.completions.create(
                model=self.modelname,
                messages=messages,
                max_tokens=1024,
                stream=stream,
            )
            
            if stream:
                return (chunk.choices[0].delta.content or "" for chunk in response)
            else:
                return response.choices[0].message.content

        except Exception as e:
            print(f"生成响应失败: {str(e)}")
            if stream:
                return iter(())  # 返回空迭代器
            else:
                return None


class EmbeddingModel:
    """用于生成文本嵌入向量的模型类"""
    
    def __init__(self, base_url: str = "http://localhost:10526/v1", api_key: str = "123456", modelname: str = "bge-m3"):
        """
        初始化EmbeddingModel类
        
        Args:
            base_url: 服务地址
            api_key: API密钥
            modelname: 模型名称
        """
        self.client = openai.OpenAI(
            base_url=base_url,
            api_key=api_key,
        )
        self.modelname = modelname
    
    def get_embeddings(self, texts: str | list[str]) -> list[list[float]]:
        """
        生成一个或多个文本的嵌入向量
        
        Args:
            texts: 可以是单个文本字符串或文本列表
        
        Returns:
            嵌入向量列表，每个向量表示为浮点数列表
        """
        # 如果输入是单个文本，转换为列表
        if isinstance(texts, str):
            texts = [texts]
        
        try:
            response = self.client.embeddings.create(
                input=texts,
                model=self.modelname,
            )
            
            # 提取嵌入向量并返回
            return [data.embedding for data in response.data]
        except Exception as e:
            print(f"生成嵌入向量失败: {str(e)}")
            # 返回与输入文本数量相同的None列表
            return [None] * len(texts)
    
    def get_embedding_dimension(self, text: Optional[str] = None) -> Optional[int]:
        """
        获取嵌入向量的维度
        
        Args:
            text: 可选文本，用于测试嵌入向量维度。如果未提供，则使用默认测试文本
        
        Returns:
            嵌入向量的维度，如果出错则返回None
        """
        if text is None:
            text = "测试文本"
        
        embeddings = self.get_embeddings(text)
        if embeddings and embeddings[0] is not None:
            return len(embeddings[0])
        return None


if __name__ == "__main__":
    # 测试LLM多轮对话（非流式）
    user_input = "你是谁"
    llm = LLM("http://127.0.0.1:10425/v1", modelname="Qwen2.5-7B-Instruct")
    res = llm.generate_response(user_input)
    print("LLM非流式结果:")
    print(res)

    # 测试LLM多轮对话（流式）
    print("\nLLM流式结果:")
    stream_res = llm.generate_response(user_input, stream=True)
    if stream_res:
        for chunk in stream_res:
            print(chunk, end='', flush=True)
        print()

    # 测试VLM单轮对话（非流式）
    vlm = VLM("http://127.0.0.1:10528/v1", modelname="Qwen2.5-VL-7B-Instruct")
    user_question = "图片中的公式是什么?"
    local_image_path = "/home/LB14787_linux/jinke/ekrVectorAI/qkv.png"
    res_local = vlm.generate_response(user_question, local_image_path, is_local_image=True)
    print("\nVLM非流式结果:")
    print(res_local)

    # 测试VLM单轮对话（流式）
    print("\nVLM流式结果:")
    stream_res_local = vlm.generate_response(user_question, local_image_path, is_local_image=True, stream=True)
    if stream_res_local:
        for chunk in stream_res_local:
            print(chunk, end='', flush=True)
        print()

    # 测试Embedding多轮对话
    embedding_model = EmbeddingModel("http://localhost:10526/v1", modelname="bge-m3")
    texts = ["这是一个测试文本", "这是另一个测试文本"]
    embeddings = embedding_model.get_embeddings(texts)
    print("\n文本嵌入向量:")
    print(embeddings)