
def get_completion(client,prompt,stream, model="sailor2:20b"):
    '''封装 openai 接口'''
    messages = [{"role": "user", "content": prompt}]

    #print('回答>>get_completion--',prompt)
    if stream :
        # 创建流式聊天完成
        response = client.chat.completions.create(
            messages=messages,
            model=model,
            stream=stream,  # 设置为流式输出
            temperature=0.1  # 模型输出的随机性，0 表示随机性最小
        )
        
    else:
        responsedata = client.chat.completions.create(
            model=model,
            messages=messages,
            temperature=0.1,  # 模型输出的随机性，0 表示随机性最小
        )
        response = responsedata.choices[0].message.content
        
    return response

### 3.4、Prompt 模板
def build_prompt(prompt_template, **kwargs):
    '''将 Prompt 模板赋值'''
    inputs = {}
    for k, v in kwargs.items():
        if isinstance(v, list) and all(isinstance(elem, str) for elem in v):
            val = '\n\n'.join(v)
        else:
            val = v
        inputs[k] = val
    return prompt_template.format(**inputs)


prompt_template = '''
{context}

#用户问：{query}
'''
#请用中文回答用户问题。
#你的任务是根据上述信息回答用户问题。
#要求：如果不包含用户问题的答案，请直接回复空字符" "，
#要求：如果已知信息不包含用户问题的答案，或者已知信息不足以回答用户的问题，请直接回复空字符" "，
#请不要输出已知信息中不包含的信息或答案，

import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def get_embeddings(client, texts, model="bge-m3:567m", dimensions=None, batch_size=1):
    all_embeddings = []
    print(f'-----------共{len(texts)}条文本');
    for i in range(0, len(texts), batch_size):
        batch_texts = texts[i:i + batch_size]
        logger.info(f'正在处理第 {i // batch_size + 1} 批文本，共 {len(batch_texts)} 个文本')
        print(f'{len(batch_texts[0])}')
        try:
            response = client.embeddings.create(input=batch_texts, model=model, dimensions=dimensions)
            #logger.info(f'API 响应成功，响应数据: {response}')
            
            batch_data = response.data
            batch_embeddings = [x.embedding for x in batch_data]
            all_embeddings.extend(batch_embeddings)
        except Exception as e:
            logger.error(f'处理第 {i // batch_size + 1} 批文本时出错: {e}')
            raise
    
    logger.info(f'总共生成了 {len(all_embeddings)} 个嵌入向量')
    return all_embeddings

### 4.4、基于向量检索的 RAG  chromadb不需要科学上网 0.4.15
import chromadb
from chromadb.config import Settings
from chromadb import PersistentClient
import math
class MyVectorDBConnector:
    def __init__(self,client, collection_name, embedding_fn,reset = False,path="D:/db_path",host='localhost',port=8066):
        '''
        conda activate agiclass
        chroma run --path D:/db_path --port 8066
        chroma_client = chromadb.Client(Settings(allow_reset=True))
        '''
        print(chromadb.__version__)
        self.collection_name = collection_name;
        self.reset = reset;
        self.path = path;
        self.host = host;
        self.port = port;
        self.client =client;
        #需要检查是否启动
        self.embedding_fn = embedding_fn
        PClient =  PersistentClient(path=path, settings=chromadb.Settings(allow_reset=True))
        chroma_client = chromadb.HttpClient(host=host,port=port )
        # 列出所有 collections
        collections = PClient.list_collections()
        print(collections)
        # 创建一个 collection
        self.collection = chroma_client.get_or_create_collection(name=collection_name)
        # 获取集合中的文档数量
        document_count = self.collection.count()
        print(f"{collection_name}当前集合中的数据量: {document_count}")
        if reset == True:
            #collection_names = [c.name for c in collections]

            # 检查 collection 是否存在
            # if collection_name in collection_names:
                # 获取 collection
                #shop36_collection = PClient.get_collection(name=collection_name)
                # 删除 collection
                PClient.delete_collection(name=collection_name)
                #print("Collection 'shop36' has been deleted.")
                
                # 执行重置操作第一次不要重置  所有数据
                #PClient.reset()
                
        
    def add_documents(self, document):
        '''向 collection 中添加文档与向量'''
        # 假设 `documents` 是已经定义好的文档列表
        document_count = self.collection.count()
        print(f"当前集合中的数据量add_bofaer: {document_count}")
        embedding = self.embedding_fn(self.client,document);
        lenid = len(document);
        end = document_count + lenid;
        self.collection.add(
            embeddings=embedding,
            documents=document,
            ids=[f"id{i}" for i in range(document_count,end)]
        )
        # 获取集合中的文档数量
        document_count = self.collection.count()
        print(f"当前集合中的数据量add_documents: {document_count}")
    def search(self, query, top_n):
        '''检索向量数据库'''
        print("检索")
        try:
            embedding =self.embedding_fn(self.client,[query])
            results = self.collection.query(
                query_embeddings=embedding,
                n_results=top_n
            )
            return results
            #print("embeddingcc22",results)
        except Exception as e:
            results = [f"检索失败: {e}"]

#3. 基于 RRF 的融合排序
def rrf(ranks, k=1):
    ret = {}
    # 遍历每次的排序结果
    for rank in ranks:
        # 遍历排序中每个元素
        for id, val in rank.items():
            if id not in ret:
                ret[id] = {"score": 0, "text": val["text"]}
            # 计算 RRF 得分
            ret[id]["score"] += 1.0/(k+val["rank"])
    # 按 RRF 得分排序，并返回
    return dict(sorted(ret.items(), key=lambda item: item[1]["score"], reverse=True))


import json
def userQuery_AI(client,userQuery,corcName,LLM,stream= False,prompt= prompt_template,vector = True,keyword = False):
    try: 
        if vector == True :
            # 创建向量数据库连接器
            vecdb_connector =  MyVectorDBConnector(client,corcName, get_embeddings)
            # 向量检索
            xl =   vecdb_connector.search(userQuery, 50)
            #print("xl",xl) 
       
            vector_search_results =[{"text": doc, "rank": i} for i, doc in enumerate(xl["documents"][0])]
            # or i, doc in enumerate(xl["documents"][0]):
                #vector_search_results["doc_" + str(i)] = {"text": doc, "rank": i}
            dfh ='''vector_search_results = {
                    "doc_"+str(documents.index(doc)): {
                        "text": doc,
                        "rank": i
                    }
                    for i, doc in enumerate(
                        xl["documents"][0]
                    )
                }  '''

        
            # 把结果转成跟上面关键字检索结果一样的格式

            #print("捕获参数11",vector_search_results)
    
        # 融合两次检索的排序结果
        if vector == True and keyword == True:
            reranked = rrf([vector_search_results])
        reranked = vector_search_results;
        reranked = reranked[0:3];
        #print("融合两次检索的排序结果",json.dumps(reranked, indent=2, ensure_ascii=False))
        #把融合检索的内容输出ai
        # 2. 构建 Prompt
        prompt_user = build_prompt(prompt, context=json.dumps( reranked, indent=4, ensure_ascii=False), query=userQuery)
        #print(prompt)
        # 3. 调用 LLM
        response =  get_completion(client,prompt_user,stream,LLM)  # 确保端口正确 
        return response;
    except TypeError as e:
        # 处理TypeError异常
        print(f"捕获到TypeError: {e}")
#asd =  userQuery_AI('公司名称是？','shop36','sailor2:20b')  
#print("公司名称是？",asd)

    