# Azure OpenAI 实现
from fastapi.responses import StreamingResponse
from openai import OpenAI
from aiClients.baseAiClient import BaseAIClient, ModelInfo

class QwenAiClient(BaseAIClient):
    def __init__(self, model_info:ModelInfo):
        self.client = OpenAI(
            api_key=model_info.key,
            base_url=model_info.base_url
        )
        self.model_info = model_info

    def query(self, messages, **kwargs):
        response = self.client.chat.completions.create(
            messages=messages,
            model=self.model_info.model,
            max_tokens=self.model_info.max_tokens,
            temperature=self.model_info.temperature,
            stream=self.model_info.streaming,
            **kwargs
        )
        return response
        for chunk in response:
            print(chunk.model_dump_json())

        # return response
        # if self.model_info.streaming:
        #     async def stream_generator():
        #         async for chunk in response:
        #             yield f"data: {chunk.choices[0].delta.content}\n\n"
        #     return StreamingResponse(stream_generator(), media_type="text/event-stream")
        # else:
        #     return response.choices[0].message.content
    




# # Qwen 实现
# from ai.baseAIClient import BaseAIClient


# class QwenClient(BaseAIClient):
#     def __init__(self, api_key: str, model_version: str):
#         self.client = QwenClientSDK(api_key, model_version)

#     async def chat_completion(self, messages, **kwargs):
#         # 调用达摩院 API 的具体实现
#         return await self.client.async_chat(messages, **kwargs)