from typing import Dict,List

from langchain_core.output_parsers import BaseOutputParser
from langchain_core.runnables import RunnableSequence, RunnablePassthrough
from langchain_ollama import ChatOllama
from langchain.chains import LLMChain
from langchain_core.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder, SystemMessagePromptTemplate
)
from langchain.memory import ConversationBufferWindowMemory
from . import prompt_config
from .class_of_prompt import sqlTemplate
params = prompt_config.PromptConfig()
from .class_of_prompt.sqlTemplate import sqlPromptTemplate
from .output_template import SqlErrorSolutionParser
import re
def create_conversation_chain(free_prompt,model_name=params.modelname, max_history=params.max_history):
    """
    创建对话链
    :param model_name: 使用的模型名称
    :param max_history: 最大历史消息数量
    :return: 配置好的对话链和记忆对象
    """
    prompt =free_prompt
    # template = "You are a helpful assistant that translates english to pirate."
    system_message_prompt = SystemMessagePromptTemplate.from_template(prompt)
    # output_parser = MyCustomParser()
    # 初始化对话记忆，限制历史消息数量
    memory = ConversationBufferWindowMemory(
        memory_key="chat_history",
        return_messages=True,
        k=max_history  # 最多保留max_history条消息
    )
    def load_memory(inputs):
        """从记忆中加载历史对话"""
        return memory.load_memory_variables({})["chat_history"]
    # 构建对话链
    output_parser = SqlErrorSolutionParser()
    conversation_chain = RunnableSequence(
        RunnablePassthrough.assign(chat_history=load_memory),  # 加载历史到chat_history变量
        prompt,  # 提示模板（使用chat_history和input）
        llm  # 调用大模型
        #output_parser
    )

    return conversation_chain, memory