from concurrent import futures
import sys
import grpc
from langchain_core.messages import HumanMessage

from app.aichat.internal.svc import svc
from app.aichat.pb import aichat_pb2, aichat_pb2_grpc
from common.utils import decorator, tracing

class AiChater(aichat_pb2_grpc.aichatServicer):
    def __init__(self, svcCtx: svc.ServiceContext):
        super().__init__()
        self.svcCtx = svcCtx
        self.tracing_util = tracing.TracingUtil()
    
    @decorator.my_grpc_handler(aichat_pb2.AiReply)
    def ChatStream(self, request, context):
        # 创建带trace信息的metadata
        metadata = {}
        self.tracing_util.inject_trace_metadata(metadata)

        BUFFER_SIZE = 16
        buffer = []

        for resp in self.svcCtx.llm_model.stream([HumanMessage(content=request.content)]):
            buffer.append(resp.content)
            if len(buffer) >= BUFFER_SIZE:
                yield aichat_pb2.AiReply(chunk="".join(buffer))
                buffer.clear()

        # 发送剩余未满缓冲区的数据
        if buffer:  
            yield aichat_pb2.AiReply(chunk="".join(buffer))

def serve(svcCtx: svc.ServiceContext):
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=svcCtx.config.max_workers))
    aichat_pb2_grpc.add_aichatServicer_to_server(AiChater(svcCtx), server)
    server.add_insecure_port(svcCtx.config.listen_on)
    server.start()
    print(f"Starting rpc server at {svcCtx.config.listen_on}...\n", file=sys.stderr)
    server.wait_for_termination()