from __future__ import annotations
import random
from typing import ( Any,  Callable, Dict, Iterable,  List,Optional,Tuple,Type,Union,  TypedDict)
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain
from langchain.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader, UnstructuredXMLLoader
from langchain.document_loaders import UnstructuredExcelLoader, JSONLoader
import re, os,json
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import numpy as np
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.utils import xor_args
from langchain_core.vectorstores import VectorStore
from rank_bm25 import CalcBM25
from ApiModels import VectInfo,ToolInfo,apiModels

DEFAULT_K = 4  # Number of Documents to return.
DEFAULT_COUNT=100
CN_FILTER=0.2

from typing import Optional, Union, Sequence, Dict, Mapping, List

# IDs
ID = str
IDs = List[ID]
URI = str
URIs = List[URI]
Metadata = Mapping[str, Union[str, int, float, bool]]
class QueryResult(TypedDict):
    ids: List[IDs]
    documents: Optional[List[List[Document]]]
    uris: Optional[List[List[URI]]]    
    metadatas: Optional[List[List[Metadata]]]
    distances: Optional[List[List[float]]]
    
from ApiTools  import apiBase

# 将数据按照不同的/领域/主题/用途等进行分类,创建collect名称
# function_name:0->从记忆搜索经验; 100->经验选择动作执行;200->对执行结果反思评估;300->从反思结果把经验保存到记忆
# 记忆搜索经验(100下)：0向量库QA（doc是没有答案）->2图数据->3任务分解->4webseek->5外部工具分解
# 经验选择动作执行(200下)：100数据库能力->101大模型nlp能力->102python能力->102外部工具
# 对执行结果反思评估(300下)：200大模型反思 ->201deepeval->202外部工具反思
# 经验保存到记忆(400下)： 300向量库->301图数据库；
# 每一步存储起来，最小的单元是 输入值+经验+llm=输出

class LlmDbVector(VectorStore):
    _LANGCHAIN_DEFAULT_COLLECTION_NAME = "LlmDbVector"

    def __init__(self,llm=None):
        self.llm=llm
        self.dir_query_result=None
        # apiBase.run_python_file("ApiVctSvr",0,check=True)
        self.tabls={}
        self.vctname="VCT_ADJUST"
        self.wheres=[]
        self.list_vct=['VCT_ADJUST',"VCT_COSINE","BM25Okapi","BM25Adpt","BM25Plus","BM25T"]
    
    # 随机生成topn和start
    def random_topn(self, vcts:list[VectInfo],maxtopn=8,maxstart=3):        
        for vct in vcts:
            vct['topn']=random.randint(0, maxtopn)
            # 偶数的情况下做个offset，增加从0开始的机会
            val=apiBase.get_random(maxstart,1)[0]
            vct['start']=val
            val=apiBase.get_random(len(self.list_vct),1)[0]
            vct['vctname']=self.list_vct[val]
        return vcts
        
    def get_collection_name(self,collection_name):
        if collection_name:
            docName = apiBase.generate_md5(collection_name)
            self._collection_name = "vct_"+docName
        return self._collection_name
            
    def create_collection(self,collection_name,create=False) -> None:
        """Ensure that the collection exists or create it."""        
        self.get_collection_name(collection_name)    
        # 防止多次创建表
        if self._collection_name in self.tabls:
            return self._collection_name
        # 判断是否要创建表
        if not create:
            return self._collection_name
        # 判断是否存在表
        sql=f"SELECT COUNT(*) as co FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'public'  AND TABLE_NAME = '{self._collection_name}'"
        exist = apiBase.query_json(sql)
        # 如果存在就不创建了
        if int(exist[0]['co']) > 0:
            return self._collection_name        
        sql=[]
        # fun_name:0->从记忆搜索经验; 100->经验选择动作执行;200->对执行结果反思评估;300->从反思结果把经验保存到记忆
        # 记忆搜索经验(100下)：0向量库QA->doc是没有答案->2图数据->3任务分解->4webseek->5外部工具分解
        # 经验选择动作执行(200下)：100数据库能力->101大模型nlp能力->102python能力->102外部工具
        # 对执行结果反思评估(300下)：200大模型反思 ->201deepeval->202外部工具反思
        # 经验保存到记忆(400下)： 300向量库->301图数据库；
        # 每一步存储起来，最小的单元是 输入值+经验+llm=输出
        sql.append(f"CREATE TABLE IF NOT EXISTS {self._collection_name}(id SERIAL PRIMARY KEY,fun_name CHARACTER VARYING(100), txt text, answer text default '',keys text default '',rft text default '', vect text ,meta text ,PRIMARY KEY(id))")
        sql.append(f"COMMENT ON TABLE {self._collection_name} IS '{collection_name}'")
        sql.append(f"ALTER TABLE {self._collection_name} ADD CONSTRAINT {self._collection_name}_un UNIQUE (txt,fun_name)")        
        apiBase.execute(sql)
        self.tabls[self._collection_name]=1
        return collection_name

    @property
    def _collection(self):
        """Returns the underlying Chroma collection or throws an exception."""
        return self._collection_name
    
    def embedding_function(self, data: list):
        return apiBase.vector_embed(data)
    
    def get_embedding_str(self, data: list):
        embeddings = apiBase.vector_embed(data)
        val=[]
        for em in embeddings:
            str_arr = ','.join(str(x) for x in em)
            val.append(str_arr)
        #print(str_arr)  # 输出: 1.0,2.0,3.0
        return val
    
    @property
    def embeddings(self) -> Optional[Embeddings]:
        """Access the query embedding object."""
        return None
        
    def query(
        self,
        query_texts: Optional[List[str]] = None,
        query_embeddings: Optional[List[List[float]]] = None,
        n_results: int = 6,
        where: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        defval:float =CN_FILTER,
    ) -> Union[List[Document], QueryResult]:        
        if query_texts :
            vts=self.get_embedding_str(query_texts)
        else:
            vts=query_embeddings
        ret={}
        ret["ids"]=[]
        ret["documents"]=[]
        ret["distances"]=[]
        ret["metadatas"]=[]
        
        size=len(self.wheres)
        index=0
        for vt,query in zip(vts,query_texts):
            # 强制txt里面包含有关键字符
            if index < size:
                where=self.wheres[index]
            else:
                where=None
            index+=1
            #sql=self.getVectSql(vt,defval,n_results,self.vctname,where)
            vctInfo = self.apiModels.get_vect(name="query",topn=n_results,vctname=self.vctname,where=where,threshold=defval)
            sql=self.getVectSql(vctInfo,vt)
            documents=[]
            ids=[]
            distances=[]
            metadatas=[]
            try:
                con=apiBase.pool.getconn()
                with con.cursor() as cur:
                    cur.execute(sql)
                    results = cur.fetchall()
                    for row in results:
                        id,txt,vct=row
                        metadatas.append({})
                        documents.append(txt)
                        ids.append(id)
                        distances.append(float(vct))
            finally:
                apiBase.pool.putconn(con)
            
            if "BM25" in self.vctname:
                cal=CalcBM25(vt['vctname'],documents)
                cal.get_top_n(query,len(documents),defval)
                documents=[documents[i] for i in cal.topn]
                ids=[ids[i] for i in cal.topn]
                distances=cal.scores
            
            ret["ids"].append(ids)
            ret["documents"].append(documents)
            ret["distances"].append(distances)
            ret["metadatas"].append(metadatas)
        return ret
    
    def add_texts(
        self,
        texts,       
        metadatas: Optional[List[dict]] = None,
        ids: Optional[List[str]] = None,
        fun_name: str='qa',
        **kwargs: Any,
    ) -> List[str]:
        """Run more texts through the embeddings and add to the vectorstore.
        Returns:
            List of IDs of the added texts.

        Raises:
            ValueError: When metadata is incorrect.
        """
        sql=f"insert into {self._collection_name}(txt,answer,keys,vect,fun_name)values(%s,%s,%s,%s,'{fun_name}') "
        index=0
        sqls=[]
        params=[]
        for txt,answer,key in texts:
            vt1=self.get_embedding_str([txt])
            if index < DEFAULT_COUNT:
                sqls.append(sql)
                params.append((txt,answer,key,vt1[0]))
                index+=1
            else:
                apiBase.execute(sqls,params)
                index=0
                sqls=[]
                params=[]
        if len(sqls) > 0:
            apiBase.execute(sqls,params,True)
        return ids

    def similarity_search(
        self,
        query: str,
        k: int = DEFAULT_K,
        filter: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Document]:
        """Run similarity search with Chroma.

        Args:
            query: Query text to search for.
            k: Number of results to return. Defaults to 4.
            filter: Filter by metadata. Defaults to None.
            kwargs: Additional keyword arguments to pass to Chroma collection query.

        Returns:
            List of documents most similar to the query text.
        """
        docs_and_scores = self.similarity_search_with_score(
            query, k, filter=filter, **kwargs
        )
        return [doc for doc, _ in docs_and_scores]
    
    def _results_to_docs_and_scores(self,results: Any) -> List[Tuple[Document, float]]:
        ret=[
            (Document(page_content=result[0], metadata= {}), result[2])
            for result in zip(
                results["documents"][0],
                results["metadatas"][0],
                results["distances"][0],
            )]
        return ret

    def similarity_search_by_vector(
        self,
        embedding: List[float],
        k: int = DEFAULT_K,
        filter: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Document]:
        """Return docs most similar to embedding vector.
        Returns:
            List of Documents most similar to the query vector.
        """
        results = self.query(
            query_embeddings=embedding,
            n_results=k,
            where=filter,
            where_document=where_document,
            **kwargs,
        )
        return [doc for doc, _ in self._results_to_docs_and_scores(results)]
        

    def similarity_search_by_vector_with_relevance_scores(
        self,
        embedding: List[float],
        k: int = DEFAULT_K,
        filter: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Tuple[Document, float]]:
        """Return docs most similar to embedding vector and similarity score.
        Returns:
            List of documents most similar to the query text and relevance score
            in float for each. Lower score represents more similarity.
        """
        results = self.query(
            query_embeddings=embedding,
            n_results=k,
            where=filter,
            where_document=where_document,
            **kwargs,
        )
        return self._results_to_docs_and_scores(results)

    def similarity_search_with_score(
        self,
        query: str,
        k: int = DEFAULT_K,
        filter: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Tuple[Document, float]]:
        """Run similarity search with Chroma with distance.
        Returns:
            List of documents most similar to the query text and
            distance in float for each. Lower score represents more similarity.
        """
        results = self.query(
            query_texts=[query],
            n_results=k,
            where=filter,
            where_document=where_document,
            **kwargs,
        )
        data =self._results_to_docs_and_scores(results)
        return data

    def max_marginal_relevance_search_by_vector(
        self,
        embedding: List[float],
        k: int = DEFAULT_K,
        fetch_k: int = 20,
        lambda_mult: float = 0.5,
        filter: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Document]:
        """Return docs selected using the maximal marginal relevance.
        Returns:
            List of Documents selected by maximal marginal relevance.
        """
        results = self.query(
            query_embeddings=embedding,
            n_results=fetch_k,
            where=filter,
            where_document=where_document,
            include=["metadatas", "documents", "distances", "embeddings"],
            **kwargs,
        )
        cal=CalcBM25("BM25Okapi",None)
        mmr_selected = cal.maximal_marginal_relevance(
            np.array(embedding, dtype=np.float32),
            results["embeddings"][0],
            k=k,
            lambda_mult=lambda_mult,
        )
        candidates = [doc for doc, _ in self._results_to_docs_and_scores(results)]
        selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
        return selected_results

    def max_marginal_relevance_search(
        self,
        query: str,
        k: int = DEFAULT_K,
        fetch_k: int = 20,
        lambda_mult: float = 0.5,
        filter: Optional[Dict[str, str]] = None,
        where_document: Optional[Dict[str, str]] = None,
        **kwargs: Any,
    ) -> List[Document]:
        """Return docs selected using the maximal marginal relevance.
        Returns:
            List of Documents selected by maximal marginal relevance.

        Raises:
            ValueError: If the embedding function is not provided.
        """        
        return self.max_marginal_relevance_search_by_vector(
            query,
            None,
            k,
            fetch_k,
            lambda_mult=lambda_mult,
            filter=filter,
            where_document=where_document,
        )
        
    def reset_collection(self) -> None:
        """Resets the collection.
        Resets the collection by deleting the collection and recreating an empty one.
        """
        self.delete_collection()
        self.create_collection(create=True)

    def get(
        self,
        ids  = None,
        where= None,
        limit: Optional[int] = 8,
        offset: Optional[int] = 0,
        where_document= None,
        include: Optional[List[str]] = None,
    ) -> Dict[str, Any]:
        """Gets the collection.
        Return:
            A dict with the keys `"ids"`, `"embeddings"`, `"metadatas"`, `"documents"`.
        """
        if limit:
            sql=f"select id,txt from {self._collection_name}) LIMIT {limit} OFFSET {offset} "
        else:
            sql=f"select id,txt from {self._collection_name}) "
            
        ret={}
        ret["ids"]=[]
        ret["documents"]=[]
        ret["distances"]=[]
        con=apiBase.pool.getconn()
        try:
            with con.cursor() as cur:
                results = cur.fetchall()
                for row in results:
                    id,txt,vct=row                
                    ret["ids"].append(id)
                    ret["documents"].append(txt)
                    ret["distances"].append(vct)                
        finally:
            apiBase.pool.putconn(con)
                
        return ret
                
    def update_document(self, document_id: str, document: Document) -> None:
        """Update a document in the collection.
        Args:
            document_id: ID of the document to update.
            document: Document to update.
        """
        return self.update_documents([document_id], [document])

    # type: ignore
    def update_documents(self, ids: List[str], documents: List[Document]) -> None:
        sql=f"update {self._collection_name} set txt=%s ,vect=%s where  id=%s "
        apiBase.execute([sql],[(data,vt1)])
        index=0
        sqls=[]
        params=[]
        for id,data in zip(ids,documents):
            if id.strip() == '':
                continue
            vt1=self.get_embedding_str([data])
            if index < DEFAULT_COUNT:
                sqls.append(sql)
                params.append((data,vt1[0],id))
                index+=1
            else:
                apiBase.execute(sqls,params)
                index=0
                sqls=[]
                params=[]
        if len(sqls) > 0:
            apiBase.execute(sqls,params)

    def drop_collection(self,collection_name=None) -> None:
        """Delete the collection."""
        self.get_collection_name(collection_name)
        sql=f"DROP TABLE  {self._collection_name}"
        apiBase.execute([sql])
        self._collection_name = None
        
    def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
        """Delete by vector IDs.
        Args:
            ids: List of ids to delete.
            kwargs: Additional keyword arguments.
        """
        sql=f"delete from {self._collection_name}  where id=%s"
        index=0
        sqls=[]
        params=[]
        for id in ids:
            if id.strip() == '':
                continue
            if index < DEFAULT_COUNT:
                sqls.append(sql)
                params.append((id))
                index+=1
            else:
                apiBase.execute(sqls,params)
                index=0
                sqls=[]
                params=[]
        if len(sqls) > 0:
            apiBase.execute(sqls,params)
    
    @classmethod
    def from_texts(
        cls: Type[LlmDbVector],
        texts: List[str],
        embedding: Optional[Embeddings] = None,
        metadatas: Optional[List[dict]] = None,
        ids: Optional[List[str]] = None,
        collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
        persist_directory: Optional[str] = None,
        collection_metadata: Optional[Dict] = None,
        **kwargs: Any,
    ) -> LlmDbVector:
        """Create a LlmDbVector vectorstore from a raw documents.
        Returns:
            LlmDbVector: LlmDbVector vectorstore.
        """
        chroma_collection = cls(
            collection_name=collection_name,
            embedding_function=embedding,
            persist_directory=persist_directory,
            collection_metadata=collection_metadata,
            **kwargs,
        )
        chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
        return chroma_collection
    
    # 根据collect名字查询数据,question={'q':'','a':'', 'score':0}
    def askQA(self,vectInfos:list[VectInfo],sys_prompt:str,question:str,fun_name="qa"):
        lsTmpl=self.get_QAdoc(vectInfos,sys_prompt,question,fun_name=fun_name)
        prompt_template = ChatPromptTemplate.from_messages(lsTmpl)
        initial_answer_chain = prompt_template | self.llm
        res = initial_answer_chain.invoke({"input": question})
        return res.content
    
    # 对结果进行验证，出现错误进行修复，修复不了，给出一个相关的分数question={'q':'','a':'', 'score':0}
    def fixQA(self,vectInfos:list[VectInfo],sys_prompt:str,question:str,api:ToolInfo):
        task={"a":"","score":0,"data":""}
        res=self.askQA(vectInfos,sys_prompt,question) 
        #print(f"res.content={res.content}")
        task["a"]=res
        if api is None:
            return task
        ret=api['fun'](res)
        if ret['code'] == 200:
            task["data"]=ret['data']
            task["score"]=1
            return task
        
        msg=ret['message']       
        fix_prompt=f'''{question}
## ai
{res}
## human
There are many errors in the above code.Here is the error messages:
{msg}
Think carefully and FIX these errors'''
        res=self.askQA(vectInfos,sys_prompt,fix_prompt)
        task["a"]=res
        ret=api['fun'](res)
        if ret['code'] == 200:
            task["data"]=ret['data']
            task["score"]=1
            return task
        # 如果修复不成功，res和task['q'] 评估相似度
        task["score"]=0
        return task
    
    def generate_embedding(self, data: str, **kwargs) -> List[float]:
        embedding = self.embedding_function([data])        
        if len(embedding) == 1:
            return embedding[0]
        return embedding
    
    # 获取向量库的算法，VCT_COSINE,VCT_ADJUST,VCT_BM25,BM25Okapi,BM25Adpt,BM25Plus,BM25T
    def getVectSql(self,vctInfo:VectInfo,vt):
        if "BM25"  not in vctInfo['vctname']:   
            vctname="VCT_COSINE"
        else:
            vctname=vctInfo['vctname']
        fun_name=vctInfo["fun_name"]
        where=vctInfo['where']
        sql=f"select txt ,answer,vct from (select txt ,answer, {vctname}(vect,'{vt}') as vct from {self._collection_name} where fun_name='{fun_name}' ) tab where  vct >={vctInfo['threshold']} "
        if where:
            sql += f" txt like '{where}' "
        sql += f" limit {vctInfo['topn']} offset {vctInfo['start']} "
        return sql
    
    # 向量查询
    def collection_query(self, vctInfo:VectInfo, quest:str,fun_name:str):
        vt1=self.get_embedding_str([quest])[0]
        if not self._collection_name:
            self._collection_name=self.get_collection_name(vctInfo['name'])
        if fun_name:
            vctInfo["fun_name"]=fun_name
        
        if "BM25"  not in vctInfo['vctname']:            
            #sql=f"select txt ,answer,vct from (select txt ,answer, {vctInfo['vctname']}(vect,'{vt1}') as vct from {self._collection_name} where fun_name='{fun_name}' ) tab where  vct >={vctInfo['threshold']} order by vct desc limit {topn} offset {vctInfo['start']}"
            sql=self.getVectSql(vctInfo,vt1)
            return apiBase.query_json(sql)
        
        vctInfo['topn']=int(vctInfo['topn'])+10
        #sql=f"select txt ,answer,vct from (select txt ,answer, VCT_COSINE(vect,'{vt1}') as vct from {self._collection_name} where fun_name='{fun_name}' ) tab where  vct >={vctInfo['threshold']} order by vct desc limit {topn} offset {vctInfo['start']}"
        sql=self.getVectSql(vctInfo,vt1)
        jsns=apiBase.query_json(sql)
        documents=[]
        for jsn in jsns:
            documents.append(jsn['txt'])
        cal=CalcBM25(vctInfo['vctname'],documents)
        txts=cal.get_top_n(quest,len(documents),float(vctInfo['threshold']))
        return txts
    
     # 根据keys查询
    def collection_keys(self, vctInfo:VectInfo, quest:str,fun_name:str):
        if not self._collection_name:
            self._collection_name=self.get_collection_name(vctInfo['name'])
        if not fun_name:
            fun_name=vctInfo["fun_name"]
        sql=f""" select  txt,answer,'1' as vct  from 
( select txt,answer,keys from {self._collection_name} where fun_name='{fun_name}' and keys <>'' ) tmp
where  '{quest}' like keys limit {vctInfo['topn']} offset {vctInfo['start']} """
        txts = apiBase.query_json(sql)        
        return txts
    
    # 批量查询0=0 查询全部类型； type=1根据key查询，2根据向量查询，
    def clts_query(self, vcts:list[VectInfo], usr_prompt:str,fun_name:str,type:int=0):
        txts=[]
        dt=[]
        for vct in vcts:
            self.create_collection(vct['name'])
            topn=int(vct['topn'])
            if fun_name is None:
                fun_name=vct["fun_name"]                
            # type=1根据key查询
            if type == 0 or  type == 1:
                tts= self.collection_keys(vct,usr_prompt,fun_name)
                # 去掉重复的值                
                for t in tts:                    
                    if t['txt'] not in dt :
                        dt.append(t['txt'])
                        txts.append(t)
                topn -= len(txts)
            if  topn <=0:
                break
            # 2根据向量查询，
            if type == 0 or  type == 2:
                vct['topn']=topn
                tts= self.collection_query(vct,usr_prompt,fun_name)
                for t in tts:
                    if t['txt'] not in dt :
                        dt.append(t['txt'])
                        txts.append(t)
        dt=None
        return txts
    
    # 如果找不到相关的doc，参考rag_adpt，把复杂的问题分解成多个简单问题
    def quest_rewriter(self,question):
        system = """You are a question rewriter that can transform or decompose input questions into multiple better versions optimized for vector library retrieval. 
View input and attempt to infer potential semantic intent/meaning.Output in this format:
```
1.{{sub question 1}};
2.{{sub question 2}};
```"""
        re_write_prompt = ChatPromptTemplate.from_messages(
            [
                ("system", system),
                (
                    "human","Here is the initial question: \n\n {question} \n Formulate an improved question.",
                ),
            ]
        )
        question_rewriter = re_write_prompt | self.llm 
        respone=question_rewriter.invoke({"question": question})
        return respone.content
    
    # 根据collect列表,查询出相近的QA,相关的块可能是第5或第7个，而不是第1或第2个
    def get_QAdoc_history(self,vectInfos:list[VectInfo],sys_prompt:str,question:str,fun_name:str):        
        ls = [("system",  sys_prompt)]
        for vectInfo in vectInfos:
            self.create_collection(vectInfo['name'])
            docs=self.clts_query([vectInfo],f'"question": "{question}"',fun_name)
            docs_len = len(docs)
            if docs_len  == 0:
                # 如果找不到相关的doc，参考rag_adpt，把提示词换一个类似的说法
                question=self.quest_rewriter(question)
                docs=self.clts_query([vectInfo],f'"question": "{question}"',fun_name)
                docs_len = len(docs)
                if docs_len  == 0:
                    continue
            # 取start开始的内容，start不一定是0
            for docu in docs:          
                ls.append(("human", docu["txt"]))
                ls.append(("ai",docu["answer"]))
        ls.append(("human", "Answer questions based on historical chat:{input}"))
        ls.append(MessagesPlaceholder(variable_name="messages", optional=True))
        return ls
    
    def get_QAdoc(self,vectInfos:list[VectInfo],sys_prompt:str,question:str,fun_name='qa'):        
        ls = [("system",  sys_prompt)]
        for vectInfo in vectInfos:
            self.create_collection(vectInfo['name'])
            docs=self.clts_query([vectInfo],f'"question": "{question}"',fun_name)
            docs_len = len(docs)
            if docs_len  == 0:
                # 如果找不到相关的doc，参考rag_adpt，把提示词换一个类似的说法
                question=self.quest_rewriter(question)
                docs=self.clts_query([vectInfo],f'"question": "{question}"',fun_name)
                docs_len = len(docs)
                if docs_len  == 0:
                    continue
            # 例子给正确和错误2个类型
            chat_history="### EXAMPLES\n"            
            for docu in docs:
                if docu["answer"]:
                    chat_history+=f'question:{docu["txt"]}\n'
                    chat_history+=f'answer:{docu["answer"]}\n\n'
                else:
                    chat_history+=docu['txt']+"\n"
                            
        prompt=chat_history+'''\n### REQUIREMENTS
According to EXAMPLES, answer questions correctly without any explanation.
question: {input}
answer:'''
        ls.append(("human", prompt))
        return ls
    
    # 训练QA对话
    def train_QA(self, collect_name,question, sql='',key=None,fun_name="qa") -> str:        
        self.create_collection(collect_name,True)
        self.add_texts([(question,sql,key)],fun_name=fun_name)
    
    # 加载文件到向量库
    def dir_upsert(self, dataDir):
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        if os.path.isfile(dataDir):
            self.dir_upsert_file(dataDir)
        else:
            self.dir_upsert_dir(dataDir)

    # 把当前文件或文件加载到向量库
    def dir_upsert_dir(self, dataDir,delimiters=r'[\n。?!？！]'):
        if os.path.isfile(dataDir):
            self.dir_upsert_file(dataDir,delimiters)
            return
        folder_names = os.listdir(dataDir)
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        for name in folder_names:
            if os.path.isfile(dataDir + "/" + name):
                self.dir_upsert_file(dataDir + "/" + name,delimiters)
            else:
                self.dir_upsert_dir(dataDir + "/" + name,delimiters)        

    # 把当前文件或文件加载到向量库
    def dir_upsert_file(self, dataDir,delimiters):        
        file_extension = dataDir.split(".")[-1]
        if file_extension == "pdf":
            loader = PyPDFLoader(dataDir)
        elif file_extension == "docx":
            loader = Docx2txtLoader(dataDir)
        elif file_extension == "xml":
            loader = UnstructuredXMLLoader(dataDir)
        elif file_extension == "hpl":
            loader = UnstructuredXMLLoader(dataDir)
        elif file_extension == "hwf":
            loader = UnstructuredXMLLoader(dataDir)
        elif file_extension == "xlsx":
            loader = UnstructuredExcelLoader(dataDir)
        elif file_extension == "json":
            loader = JSONLoader(dataDir, encoding="utf-8")
        # elif file_extension == "md":
        #    loader = UnstructuredMarkdownLoader(dataDir,encoding="utf-8")
        elif file_extension == "md":
            loader = TextLoader(dataDir, encoding="utf-8")
        elif file_extension == "csv":
            loader = TextLoader(dataDir, encoding="utf-8")
        else:
            loader = TextLoader(dataDir, encoding="utf-8")
        documents = loader.load()        
        return self.split_document(dataDir,documents,delimiters)
    
    # 自定义分隔符分割document
    def split_document(self,docName,documents,delimiters = r'[\n。?!？！]'):
        texts=[]
        for doc in documents:        
            sections = re.split(delimiters, doc.page_content)
            for section in sections:
                section=section.strip()
                if section == '':
                    continue
                texts.append(section)
        self.load_mes_vec(docName,texts)
        return self
        
    # 修复3次,如果不成功就退出
    def rag_fix(self,type,verify_api,vec_name,code,sys_prompt,usr_prompt,fix_prompt):
        error = None
        msg=usr_prompt
        
        # 如果有错误,就进行修复
        prompt=""
        if code is not None and  len(code) >0:
            prompt="##code\r\n"+code+"\r\n"
        if error is not None and  len(error) >0:
            prompt = prompt + "##error\r\n The error is as follows:"+error+"\r\n" 
        prompt = prompt + "##human\r\n"+msg
        
        out = self.dir_query(type,vec_name,sys_prompt,prompt)
        if out is None:
            out = apiBase.llm_chat(sys_prompt,msg,code,error)
        
        if verify_api is  None:
            return "the API is None"
        ret = apiBase.call_api(verify_api,out.page_content,True)
        if ret is None:
            return "The format returned by the API is incorrect"      
        error = ""
        for item in ret['data']:
            error = error + item['error']
        if error == "":
            return out
        code=out
        error=apiBase.decode64(error)
        msg=fix_prompt            
        return "Fix unsuccessful"
    
    # 字符串保存到向量库，filepath是mes的来源
    def load_mes_vec(self, vec_name, mes:list[str],fun_name="doc",key=''):
        self.create_collection(vec_name,True)
        # 是否要 提取 关键字？？？
        for msg in mes:
            self.add_texts([(msg,'',key)],fun_name=fun_name)
        
    # 根据文件或文件夹查询
    def askDirs(self, dirs:list[str],sys_prompt:str,usr_prompt:str):        
        self.dir_query_result = None        
        for dataDir in dirs:
            if dataDir.endswith('/'):
                dataDir = dataDir[:-1]            
            if os.path.isfile(dataDir):
                self.dir_query_file(dataDir, sys_prompt,usr_prompt, "txt")
            else:
                self.dir_query_dir(dataDir, sys_prompt,usr_prompt)
            if self.dir_query_result:
                return self.dir_query_result   
        return self.dir_query_result
    
    # 根据文件夹查询
    def dir_query_dir(self, dataDir, sys_prompt,usr_prompt):
        self.dir_query_result = None
        folder_names = os.listdir(dataDir)
        for name in folder_names:
            if os.path.isfile(dataDir + "/" + name):
                self.dir_query_file(dataDir + "/" + name, sys_prompt,usr_prompt, "txt")
            else:
                self.dir_query_dir(dataDir + "/" + name, sys_prompt,usr_prompt)
        return self.dir_query_result
    
    # 当个文件查询
    def dir_query_file(self, dataDir, sys,usr, type):
        if not self.dir_query_result:
            return self.dir_query_result
        if dataDir.endswith('/'):
            dataDir = dataDir[:-1]
        # self.dir_query_result = index.run(prompt)        
        retriever = self.get_collect_retriever(dataDir)
        if sys is None or len(sys) == 0:
            system_prompt = apiBase.prompt_read("aigc/rag/system")
        else:
            system_prompt=sys
        if "{context}" not in system_prompt:
            system_prompt += "\r\n {context}"
        user_prompt=apiBase.prompt_read("aigc/rag/user")       
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", system_prompt),
                ("human", user_prompt),
            ]
        )
        question_answer_chain = create_stuff_documents_chain(self.llm, prompt)
        rag_chain = create_retrieval_chain(retriever, question_answer_chain)
        response = rag_chain.invoke({"input": usr})
        if len(response["context"]) == 0:
            return self.dir_query_result
        #if type == 'file':
        self.dir_query_result = response["answer"]
        # print(self.dir_query_result)
        self.dir_query_result = f'"answer":"{response["answer"]}","file":"{dataDir}"'
        return self.dir_query_result
        
    @staticmethod
    def cosine_relevance_score_fn(distance: float) -> float:
        """Normalize the distance to a score on a scale [0, 1]."""
        return distance
    
    def _select_relevance_score_fn(self) -> Callable[[float], float]:
        return self.cosine_relevance_score_fn
    # 根据doc的名称,获取
    def get_collect_retriever(self,dir,k=6):
        self.create_collection(dir)
        # retriever = self.as_retriever(
        #     #search_type="similarity",
        #     search_type="similarity_score_threshold",
        #     search_kwargs={'score_threshold': 0.3},
        #     k=k
        # )
        retriever = self.as_retriever(
            search_type="similarity",
            k=k
        )
        return retriever
    
    # 通过文件夹查询
    def call_rag(self, label, param):
        apiBase.log(f"call_rag:{label} {param}")
        if label == 'load':
            return self.dir_upsert(param)
        lst = param.split()
        dataDir = lst[0]
        prompt = lst[1]
        return self.dir_query(dataDir, prompt)
    
    # 把doc转成qa
    def doc2qa(self, vcts:list[VectInfo],sys:str,doc:str):
        sys_prompt='You are an expert in question and answer generation'
# 你是问答对生成专家，以样例1,样例2,样例3作为参考，基于样例4提供的内
# 容，巧妙的设计若干问答对，问答对需要符合以下要求:
# 1.表格内容和表格下面的注的结合应该是有逻辑性的，不要强行关联
# 2.每个问答对包含“问题”和“答案”。“答案”中包含出处分析和结论两部分，出处分析应包含原文章节出处(具体到三级标题)
# 3.出处分析中涉及到表格内容的不要引用完整表格，而是输出表头以及关联数据所在的行，注意某些表格,比如样例3中的表格,实际表头可能由原始表头和原始表头下面的第一行或者多行组成,绝对不能遗漏
# 5.如果无法生成符合要求的问答对,不输出内容直接返回
# 6.如果生成多个符合要求的问答对，按照"问题: 答案:.. 问题: 答案:"的顺序输出。

        return "todo"
    
    @classmethod
    def from_documents(
        cls: Type[LlmDbVector],
        documents: List[Document],
        embedding: Optional[Embeddings] = None,
        ids: Optional[List[str]] = None,
        collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
        persist_directory: Optional[str] = None,
        collection_metadata: Optional[Dict] = None,
        **kwargs: Any,
    ) -> LlmDbVector:
        """Create a LlmDbVector vectorstore from a list of documents.
        Returns:
            LlmDbVector: LlmDbVector vectorstore.
        """
        texts = [doc.page_content for doc in documents]
        metadatas = [doc.metadata for doc in documents]
        return cls.from_texts(
            texts=texts,
            embedding=embedding,
            metadatas=metadatas,
            ids=ids,
            collection_name=collection_name,
            persist_directory=persist_directory,
            collection_metadata=collection_metadata,
            **kwargs,
        )

