import asyncio
import os
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.llms import ChatGLM
from langchain.prompts import PromptTemplate
from langchain.text_splitter import CharacterTextSplitter

from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
import json
import websockets

db = None
qa = None



# 加载嵌入模型
embedding_model_dict = {
    "ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
    "ernie-base": "nghuyong/ernie-3.0-base-zh",
    "text2vec": "GanymedeNil/text2vec-large-chinese",
    "text2vec2": "uer/sbert-base-chinese-nli",
    "text2vec3": r"D:\AtomGit\model\embedding\text2vec-base-chinese",
}



def load_documents(directory="books"):
    loader = DirectoryLoader(directory)
    documents = loader.load()
    text_spliter = CharacterTextSplitter(chunk_size=256, chunk_overlap=0)
    split_docs = text_spliter.split_documents(documents)
    return split_docs

def load_embedding_model(model_name):
    encode_kwargs = {"normalize_embeddings": False}
    # 不需要为 CPU 使用指定设备
    return HuggingFaceEmbeddings(
        model_name=embedding_model_dict[model_name],
        encode_kwargs=encode_kwargs
    )

# 加载嵌入模型
embeddings = load_embedding_model('text2vec3')
# 加载或创建数据库

print("Embedding模型加载成功")
# 创建语言模型
llm = ChatGLM(
    endpoint_url='http://127.0.0.1:8000',
    max_token=80000,
    top_p=0.9
)

def store_chroma(docs, embeddings, persist_directory="VectorStore"):
    db = Chroma.from_documents(docs, embeddings, persist_directory=persist_directory)
    db.persist()
    return db


def text_get(text_path):
    global db
    global qa
    if not os.path.exists('VectorStore'):
        documents = load_documents(text_path)
        db = store_chroma(documents, embeddings)
    else:
        db = Chroma(persist_directory='VectorStore', embedding_function=embeddings)
    # 创建问答系统
    QA_CHAIN_PROMPT = PromptTemplate.from_template("""根据下面的上下文（context）内容回答问题。
    如果你不知道答案，就回答不知道，不要试图编造答案。
    答案最多3句话，保持答案简介。
    总是在答案结束时说”谢谢你的提问！“
    {context}
    问题：{question}
    """)
    retriever = db.as_retriever()
    qa = RetrievalQA.from_chain_type(
        llm=llm,
        retriever=retriever,
        verbose=True,
        chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
    )




async def chat_handler(websocket):
    try:
        print("WebSocket连接已建立")
        while True:
            user_input = await websocket.recv()
            print(f"用户：{user_input}")  # 打印用户输入

            model_response = await generate_response(user_input)

            await websocket.send(model_response)
    except websockets.exceptions.ConnectionClosed:
        print("WebSocket连接已关闭")

async def generate_response(user_input):
    response = qa.run(user_input)
    return json.dumps(response)

    
start_server = websockets.serve(chat_handler, "localhost", 8765)

asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
