# ollama 相关定义
import os, time, json
from langchain_ollama import OllamaEmbeddings, OllamaLLM
from langchain_postgres import PGVector
from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.prompts import ChatPromptTemplate,HumanMessagePromptTemplate

from flask import Flask, request, jsonify, render_template, Response

class OllamaService:
    def __init__(self):
        self.OLLAMA_HOST = os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434')  # 从环境变量读取
        self.embeddings = self.init_embeddings()
        self.llm = self.init_llm()
        self.connection = "postgresql+psycopg://langchain:langchain@192.168.137.2:5432/langchain"
        self.vector_store = self.init_vector_store()
        self.qa_chain = self.initQAChain()

    def init_embeddings(self):
        return OllamaEmbeddings(
            model='mxbai-embed-large',
            base_url=self.OLLAMA_HOST  # 添加base_url参数
        )
    
    def init_llm(self):
        return OllamaLLM(
            model='modelscope2ollama-registry.azurewebsites.net/unsloth/DeepSeek-R1-Distill-Qwen-1.5B-GGUF',
            temperature=0.3,
            base_url=self.OLLAMA_HOST,  # 添加base_url参数
            stream=True  # 开启stream
        )
    def init_vector_store(self):
        return PGVector(
                connection= self.connection,
                embeddings=self.embeddings,
                collection_name="my_docs",
                use_jsonb=True,
            )
    def add_documents(self, filepath):
        if not filepath:
            return
        loader = PyPDFLoader(filepath) if filepath.endswith('.pdf') else TextLoader(filepath)
        documents = loader.load()
        
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1024,
            chunk_overlap=200,
            length_function=len
        )
        chunks = text_splitter.split_documents(documents)
        if self.vector_store:
            self.vector_store.add_documents(chunks)
            print(f'文档已追加: {filepath}')
        else:
            self.vector_store = PGVector.from_documents(
                documents=chunks,
                embedding=self.embeddings,
                collection_name="my_docs",
                connection= self.connection
            )
            print(f'新建知识库成功: {filepath}')
    def initQAChain(self):
        # 定义提示模板
        human_template = "中文回答我的问题: {question} 。"
        human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
        prompt = ChatPromptTemplate.from_messages([human_message_prompt])

        return (
            prompt
            | self.llm
            | StrOutputParser()
        )
    
    # def initQAChain(self):
    #     # 定义提示模板
    #     prompt = ChatPromptTemplate.from_messages([
    #         ("human", "{question}")
    #     ])
    #     def format_docs(docs):
    #         print(docs)
    #         return "\n\n".join(doc.page_content for doc in docs)
    #     return (
    #         {
    #             "context": self.vector_store.as_retriever(search_type="mmr",search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5}) | format_docs,
    #             # "context": "中文沟通回答问题",
    #             "question": RunnablePassthrough(),
    #         }
    #         | prompt
    #         | self.llm
    #         | StrOutputParser()
    #     )