import grpc
import openai
from llm_base import LLMProviderBase
import llm_pb2
import llm_pb2_grpc
import logging
from utils.util import get_local_ip
from concurrent import futures

# 配置日志
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("LLMdeepseekServer")


class LLMProvider(LLMProviderBase):
    _instance = None

    def __new__(cls, config):
        
        if cls._instance is None:
            cls._instance = super(LLMProvider, cls).__new__(cls)
            cls._instance.__init__(config)
        return cls._instance

    def __init__(self, config):
        if not hasattr(self, "initialized"):
            self.model_name = config.get("model_name","deepseek-chat")
            self.api_key = config.get("api_key")
            self.base_url = config.get("base_url", "https://api.deepseek.com")  # 默认值
            if not self.api_key or self.api_key.strip() == "":
                logger.error("你还没配置LLM的密钥，请在配置文件中配置密钥，否则无法正常工作")
            self.client = openai.OpenAI(api_key=self.api_key, base_url=self.base_url)
            self.session_responses = {}  # 用于存储每个 session_id 的响应
            self.initialized = True

    def response(self,session_id,request):
        try:
            if session_id not in self.session_responses:
                self.session_responses[session_id] = self.client.chat.completions.create(
                    model=self.model_name,
                    messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
                    stream=True,
                )
            responses = self.session_responses[session_id]
            for chunk in responses:
                try:
                    chunk_message = chunk.choices[0].delta if chunk.choices else None
                    if not chunk_message or not chunk_message.content:
                        continue
                    yield llm_pb2.ChatResponse(delta=chunk_message.content)
                except Exception as e:
                    logger.error(f"Error in chunk processing: {e}")
        except Exception as e:
            logger.error(f"Error in response generation: {e}")
        finally:
            # 清理会话数据
            if session_id in self.session_responses:
                del self.session_responses[session_id]
                
 
class ChatService(llm_pb2_grpc.ChatServiceServicer):
    def __init__(self, config):
        self.llm_provider = LLMProvider(config)

    def ChatStream(self, request, context):
        session_id = request.session_id  # 获取请求中的 session_id
        return self.llm_provider.response(session_id,request)


def serve(config):
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    llm_pb2_grpc.add_ChatServiceServicer_to_server(ChatService(config=config), server)
    # 绑定服务器到端口
    port = '[::]:50051'  # 监听的端口
    server.add_insecure_port(port)
    # 打印服务器启动信息
    local_ip = get_local_ip()
    # 打印服务器运行状态
    print(f"gRPC 服务器已启动，正在监听地址 {local_ip}:{port}")
    print("gRPC 服务器正在运行...")
    server.start()
    # 等待服务器关闭
    server.wait_for_termination()


if __name__ == '__main__':
    config = {
        "model_name": "deepseek-chat",
        "api_key": "sk-2c09df2ee6234eafac7fc49e0d45f72a",
        "base_url": "https://api.deepseek.com"
    }
    serve(config)

# import grpc
# import openai
# from llm_base import LLMProviderBase
# import llm_pb2
# import llm_pb2_grpc
# import logging
# from utils.util import get_local_ip

# from concurrent import futures
# logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# logger = logging.getLogger("LLMOpenAIServer")


# class LLMProvider(LLMProviderBase):
#     _instance = None

#     def __new__(cls, config):
#         if cls._instance is None:
#             cls._instance = super(LLMProvider, cls).__new__(cls)
#             cls._instance.__init__(config)
#         return cls._instance

#     def __init__(self, config):
#         if not hasattr(self, "initialized"):
#             self.model_name = config.get("model_name")
#             self.api_key = config.get("api_key")
#             self.base_url = config.get("base_url", "https://api.zhipuai.cn/v1")  # 默认值
#             if not self.api_key or self.api_key.strip() == "":
#                 logger.error("你还没配置LLM的密钥，请在配置文件中配置密钥，否则无法正常工作")
#             self.client =openai.OpenAI(api_key=self.api_key, base_url=self.base_url)
#             self.session_responses = {}  # 用于存储每个 session_id 的响应
#             self.initialized = True

#     def response(self, request, session_id):
#         try:
#             if session_id not in self.session_responses:
#                 self.session_responses[session_id] = self.client.chat.completions.create(
#                     model=self.model_name,
#                     messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
#                     stream=True,
#                 )
#             responses = self.session_responses[session_id]
#             is_active = True
#             for chunk in responses:
#                 try:
#                     # 检查是否存在有效的choice且content不为空
#                     delta = chunk.choices[0].delta if getattr(chunk, 'choices', None) else None
#                     content = delta.content if hasattr(delta, 'content') else ''
#                 except IndexError:
#                     content = ''
#                 if content:
#                     # 处理标签跨多个chunk的情况
#                     if '<think>' in content:
#                         is_active = False
#                         content = content.split('<think>')[0]
#                     if '</think>' in content:
#                         is_active = True
#                         content = content.split('</think>')[-1]
#                     if is_active:
#                         yield llm_pb2.ChatResponse(delta=content)

#         except Exception as e:
#             logger.error(f"Error in response generation: {e}")
#             # yield f"Error in response generation: {e}"
            

# class ChatService(llm_pb2_grpc.ChatServiceServicer):
#     def __init__(self, config):
#         self.llm_provider = LLMProvider(config)

#     def ChatStream(self, request, context):
#         session_id = request.session_id  # 获取请求中的 session_id
#         return self.llm_provider.response(request, session_id)


# def serve(config):

#     server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
#     llm_pb2_grpc.add_ChatServiceServicer_to_server(ChatService(config=config), server)
#     # 绑定服务器到端口
#     port = '[::]:50051'  # 监听的端口
#     server.add_insecure_port(port)
#     # 打印服务器启动信息
#     local_ip = get_local_ip()
#     # 打印服务器运行状态
#     print(f"gRPC 服务器已启动，正在监听地址 {local_ip}:{port}")
#     print("gRPC 服务器正在运行...")
#     server.start()
#     # 等待服务器关闭 
#     server.wait_for_termination()

# if __name__ == '__main__':
#     config = {
#         "model_name": "glm-4-0520",
#         "api_key": "8a6c3e356cb389e8c58ccb65d625daff.siAbJuFBxjiaekYA",
#         "base_url": "https://api.zhipuai.cn/v1"
#     }
#     serve(config)
                