from langchain_ollama import OllamaLLM, OllamaEmbeddings
from model.base_model import BaseModel
from langchain_core.messages import HumanMessage
import dashscope




class DeepSeekR1Prompt(BaseModel):
    def __init__(self):
        self.chatLLM = OllamaLLM(model="deepseek-r1:7b")
        self.embed_model = OllamaEmbeddings(model="deepseek-r1:7b")


    def answer(self, text):
        return self.chatLLM.stream([HumanMessage(content=text)])
    

    def query_vertory(self, text):
        try:
            embeddings = self.embed_model.embed_documents([text])
            return embeddings[0] if embeddings else []
        except Exception as e:
            resp = dashscope.TextEmbedding.call(
                    model=dashscope.TextEmbedding.Models.text_embedding_v2,
                    input=text)
            if "output" in resp and "embeddings" in resp["output"]:
                return resp["output"]["embeddings"][0]["embedding"]
            return resp
    
    # 实现基类中的方法，返回底层LLM模型
    def get_llm_model(self):
        return self.chatLLM

