from datetime import datetime
from langchain.chains.question_answering.map_rerank_prompt import output_parser
from langchain.chains.sequential import SequentialChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain_core.callbacks import StreamingStdOutCallbackHandler
from langchain_ollama import OllamaLLM
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain,RouterChain
from langchain.memory import Memory, ConversationBufferMemory, ConversationSummaryMemory
import sqlite3

#最大历史
MAX_HISTORY_LENGTH = 5
#对话轮数计数器
dialogue_count = 0

llm=OpenAI(api_key="",model = "",url = "https://api.openai.com/v2/",stream = True)
llm = OllamaLLM(
    model="qwen3:4b",
    temperature=0.3,
    num_ctx=4096,
    stream=True,
    callbacks=[StreamingStdOutCallbackHandler()]
)
#初始化向量嵌入模型
# embeddings_model = OpenAIEmbeddings()
embeddings_model = HuggingFaceEmbeddings(
            model_name="model/bge-large-zh",
            model_kwargs={'device': 'cuda'}   # 如果没有GPU改成 'cpu'
        )
#创建向量数据库
vector_store = FAISS(embeddings_model)
memory = Memory()
#创建聊天记忆缓存区
memory = ConversationBufferMemory()
#创建摘要缓存
summary_memory = ConversationSummaryMemory(llm=llm)
summary_memory.llm.max_tokens = 100 #控制摘要长度
summary_memory.llm.temperature = 0.5 #控制生成稳定性
#定义模板
prompt_template = PromptTemplate(
    input_variables="请在知识库中检索{topic}相关内容",
    template=["topic"]
)
#流式输出回调函数
def stream_callback(response):
    for chunk in response:
        print(chunk["choices"][0]["text"],end="")
#汇总流式输出内容
def aggregate_stream_data(response):
    full_content = ""
    for chunk in response:
        content = chunk["choices"][0]["text"]
        full_content += content
        print(content)
    print(f"完整内容{full_content}")
    return full_content
#示例 请求生成内容并汇总流式输出
# response = llm(prompt_template,callback=stream_callback)

#向量化并存储用户输入和模型输出
def store_conversation_as_vector(vector_store,user_input,model_response):
    #向量化用户输入和模型回复
    user_vector = embeddings_model.embed(user_input)
    model_vector = embeddings_model.embed(model_response)
    #存储向量化数据
    vector_store.add_texts([user_vector,model_vector],embeddings = [user_vector,model_vector])
def retrieve_relevant_conversation(vector_store,user_input):
    #向量化查询
    query_vector = embeddings_model.embed(user_input)
    #从向量数据库中检索最相关的内容
    silimar_texts = vector_store.silimar_search(query_vector,k=2) #返回最相似的两条ei
    return silimar_texts
#定义对话存储和检索
def update_and_retrieve(vector_store,user_input,model_response,query):
    #存储新的对话内容
    store_conversation_as_vector(vector_store,user_input,model_response)
    #检索与查询最相关的历史内容
    relevant_conversation = retrieve_relevant_conversation(vector_store,query)
    return relevant_conversation
#自定义清理函数
def clean_old_vectors(vector_store,max_entries = 10):
    #向量存储库只保留最近max_entries条数据
    if len(vector_store) > max_entries:
        #移除较早的向量
        vector_store.remove_oldest_entries(len(vector_store) - max_entries)
#初始化对话历史
conversation_history = []
#根据论数调整深度
def adjust_memory_depth(memory,max_depth=5):
    memory.buffer_window = max_depth
#动态调整记忆深度
def dynamic_memory_update(user_input,model_response):
    global dialogue_count
    dialogue_count += 1

    #动态调整深度，每五轮减少一次深度
    if dialogue_count <= 5:
        adjust_memory_depth(memory,max_depth=5)
    elif dialogue_count <= 10:
        adjust_memory_depth(memory,max_depth=3)
    else:
        adjust_memory_depth(memory,max_depth=2)
    #存储当前的消息
    memory.save_context({"input": user_input},{"output": model_response})
#自定义消息存储函数,带有时间戳
def add_timestamped_message(memory,user_input,model_response):
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    memory.save_content(
        {"input":f"{timestamp}用户：{user_input}"},
        {"output":f"{timestamp}用户：{model_response}"}
    )

def generate_conversation_prompt(user_input):
    conversation_history.append(user_input)
    #如果大于最大论数
    if len(conversation_history) > MAX_HISTORY_LENGTH:
        conversation_history.pop(0)
    prompt ="以下是用户和助手的对话历史"
    for turn in conversation_history:
        prompt += f"{turn['role']}:{turn['content']}\n"
    prompt += "助手："
    return prompt
def generate_response(user_input):
    prompt = generate_conversation_prompt(user_input)
    response = llm.prompt(prompt)
    assistant_response = response.choices[0].text.strip()
    conversation_history.append({"role":"助手","content":assistant_response})
    return assistant_response

#预处理
def preprocess(user_input):
    #去除多余空格并将字母小写
    cleaned_input = user_input.strip().lower()
    #将首字母大写
    cleaned_input = cleaned_input.capitalize()
    return cleaned_input
#在数据库中查找
def query_database(keyword):
    conn = sqlite3.connect("database.db")
    cursor = conn.cursor()
    cursor.execute("SELECT info FROM topics WHERE name=?",(keyword,))
    result = cursor.fetchone()
    conn.close()
    return result[0] if result else '没有找到数据'
#路由
def determine_router(input_text):
    if "查询" in input_text:
        return "default"

#聊天链，含有记忆缓存
chain = LLMChain(llm=llm,prompt=prompt_template,memory=summary_memory)

chain = (vector_store | prompt_template) | llm | output_parser
result = chain.invoke({"topic":"问题"})
print(result)

#初始化序列链
sequential_chain = SequentialChain(llm=llm,prompt=prompt_template,memory=memory)

#定义路径映射，将结果映射到子链
router_map = {
    "default": sequential_chain
}
#创建路由链
router_chain = RouterChain(router_map=router_map,router_function=determine_router)
result = sequential_chain.run()
print(result)
print("动态调整后的历史",memory.load_memory_variables())
#查看会话摘要
print(f"生成的会话摘要{summary_memory.load_memory_variables({})}")
#清楚对话历史
memory.clear()