from app.rag.internal.config import config
from common.model.vecmodel_paper_segment import PaperSegmentModel
from common.rpc import stub
from common.rpc.assistant import assistant_pb2_grpc
from common.rpc.chat import chat_pb2_grpc
from common.rpc.paperfile import paperfile_pb2_grpc
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_openai import ChatOpenAI
from pymilvus import MilvusClient

class ServiceContext:
    def __init__(self, c: config.Config):
        self.config = c

        milvus_client = MilvusClient(uri=c.milvus_conf.uri)
        self.paper_segment_model = PaperSegmentModel(milvus_client)
        
        self.embeddings = DashScopeEmbeddings(
            model=c.ali_embeddings_conf.model,
            dashscope_api_key=c.ali_embeddings_conf.api_key,
        )
        
        self.assistant_stub: assistant_pb2_grpc.AssistantStub = \
            stub.new_rpc_stub(c.assistant_rpc_conf, assistant_pb2_grpc.AssistantStub)
        self.chat_msg_stub: chat_pb2_grpc.ChatMsgServiceStub = \
            stub.new_rpc_stub(c.chat_rpc_conf, chat_pb2_grpc.ChatMsgServiceStub)
        self.chat_dia_stub: chat_pb2_grpc.ChatDialogServiceStub = \
            stub.new_rpc_stub(c.chat_rpc_conf, chat_pb2_grpc.ChatDialogServiceStub)
        self.paperfile_stub: paperfile_pb2_grpc.PaperFileStub = \
            stub.new_rpc_stub(c.paperfile_rpc_conf, paperfile_pb2_grpc.PaperFileStub)
            
    def new_llm_model(self) -> ChatOpenAI:
        return ChatOpenAI(
            api_key=self.config.ali_llm_conf.api_key,
            base_url=self.config.ali_llm_conf.base_url,
            model=self.config.ali_llm_conf.model,
            # other params...
            extra_body={"enable_thinking": False}
        )