from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama.llms import OllamaLLM

class LLMClient:
    def __init__(self, model_name="qwen:14b", base_url=""):
        print(f"Using model: {model_name}")
        self.model =  OllamaLLM(model=model_name,base_url=base_url)

    def summarize(self, text):
        print(text)
        """调用 Qwen 模型生成摘要"""
        prompt = ChatPromptTemplate.from_template("请根据以下内容分析生成汇总的报告：\n\n{text}")
        chain = prompt | self.model
        response=chain.invoke({"text":text})
        return response

