import grpc
# import 
from utils.util import get_local_ip

from concurrent import futures
import llm_pb2
import llm_pb2_grpc
from zhipuai import ZhipuAI
from llm_base import LLMProviderBase

# from config.logger import setup_logging
import logging
# 配置日志
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("LLMzhipuappServer")

# TAG = __name__
# logger = setup_logging()

class LLMProvider(LLMProviderBase):
    _instance = None

    def __new__(cls, config):
        
        if cls._instance is None:
            cls._instance = super(LLMProvider, cls).__new__(cls)
            cls._instance.__init__(config)
        return cls._instance

    def __init__(self, config):
        if not hasattr(self, "initialized"):
            self.model_name = config.get("model_name")
            self.api_key = config.get("api_key")
            self.base_url = config.get("base_url", "https://api.zhipuai.cn/v1")  # 默认值
            if not self.api_key or self.api_key.strip() == "":
                logger.error("你还没配置LLM的密钥，请在配置文件中配置密钥，否则无法正常工作")
            self.client = ZhipuAI(api_key=self.api_key)
            self.session_responses = {}  # 用于存储每个 session_id 的响应
            self.initialized = True

    def response(self,session_id,request):
        try:
            if session_id not in self.session_responses:
                self.session_responses[session_id] = self.client.chat.completions.create(
                    model=self.model_name,
                    messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
                    stream=True,
                )
            response = self.session_responses[session_id]
            for chunk in response:
                if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                    delta = chunk.choices[0].delta.content
                    yield llm_pb2.ChatResponse(delta=delta)
        except Exception as e:
            logger.error(f"Error in response generation for session {session_id}: {e}")

class ChatService(llm_pb2_grpc.ChatServiceServicer):
    def __init__(self, config):
        self.llm_provider = LLMProvider(config)

    def ChatStream(self, request, context):
        session_id = request.session_id  # 获取请求中的 session_id
        return self.llm_provider.response(session_id,request)



def serve(config):

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    llm_pb2_grpc.add_ChatServiceServicer_to_server(ChatService(config=config), server)
    # 绑定服务器到端口
    port = '[::]:50051'  # 监听的端口
    server.add_insecure_port(port)
    # 打印服务器启动信息
    local_ip = get_local_ip()
    # 打印服务器运行状态
    print(f"gRPC 服务器已启动，正在监听地址 {local_ip}:{port}")
    print("gRPC 服务器正在运行...")
    server.start()
    # 等待服务器关闭 
    server.wait_for_termination()

if __name__ == '__main__':
    config = {
        "model_name": "glm-4-0520",
        "api_key": "8a6c3e356cb389e8c58ccb65d625daff.siAbJuFBxjiaekYA",
        "base_url": "https://api.zhipuai.cn/v1"
    }
    serve(config)
    
    
# class ChatService(llm_pb2_grpc.ChatServiceServicer):
#     def ChatStream(self, request, context):
#         client = ZhipuAI(api_key="8a6c3e356cb389e8c58ccb65d625daff.siAbJuFBxjiaekYA")  # 请填写您自己的APIKey
#         response = client.chat.completions.create(
#             model=request.model,
#             messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
#             stream=True,
#         )

#         for chunk in response:
#             # yield chat_pb2.ChatResponse(delta=chunk.choices[0].delta)
#             if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
#                 delta = chunk.choices[0].delta.content
#                 yield llm_pb2.ChatResponse(delta=delta)
