from llama_index.core import SimpleDirectoryReader, Settings, VectorStoreIndex, StorageContext
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
# from llama_index.llms.ollama import Ollama
from llama_index.llms.deepseek import DeepSeek
import chromadb
from llama_index.core.tools import FunctionTool
from llama_index.readers.file import PyMuPDFReader
import re
from llama_index.core.memory import ChatMemoryBuffer
# import nest_asyncio
# nest_asyncio.apply()


# import sys
# from pathlib import Path

# # 获取项目根目录（假设 question.py 在 agent/ 下）
# project_root = Path(__file__).parent.parent  # 向上两级到 my_project/
# sys.path.append(str(project_root))  # 临时添加路径

from config import mysql

def init():
    # 文档加载
    reader = SimpleDirectoryReader(
        input_dir="c:/Users/大糊/Desktop/llm_data/", 
        recursive=False, # 是否递归遍历子目录
        required_exts=[".txt"], # 只读取指定后缀的文件
    )
    documents = reader.load_data()
    # print(documents[0].text)
    # print("================================")

    # 文本处理
    node_parser = SentenceSplitter(chunk_size=500, chunk_overlap=50)
    nodes = node_parser(documents)
    
    # 向量存储
    chroma_client = chromadb.PersistentClient(path="./db")
    vector_store = ChromaVectorStore(chroma_collection=chroma_client.get_or_create_collection("animal-plant-identification"))
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    
    # 模型配置
    Settings.embed_model = HuggingFaceEmbedding(model_name="E:/huggingface_cache/BAAI/bge-small-zh-v1.5")
    # Settings.llm = Ollama(base_url="http://192.168.0.200:11434", model="deepseek-r1:8b", stream=True)
    Settings.llm = DeepSeek(
        api_key="sk-e56d121210a245bf98505acc24955090",  # 替换为你的DeepSeek API key
        model="deepseek-chat",  # 或其他DeepSeek提供的模型名称
        temperature=0.7,
        max_tokens=2000,
    )
    
    # 构建索引
    index = VectorStoreIndex(nodes=nodes, storage_context=storage_context)
    
    # 创建带有记忆功能的聊天引擎
    memory = ChatMemoryBuffer.from_defaults(token_limit=2000)  # 设置记忆token限制

    # 交互系统
    # query_engine = index.as_query_engine(
    #     similarity_top_k=5,
    #     streaming=True,
    # )
    query_engine = index.as_chat_engine(
        chat_mode="context",  # 使用上下文聊天模式
        memory=memory,  # 添加记忆功能
        similarity_top_k=5,
        streaming=True,
    )
    print("AI已就绪，输入问题开始对话")
    return query_engine


def getAnswer(query_engine,query):
    # response = query_engine.query(query)
    response = query_engine.chat(query)

    # 将响应转换为字符串
    response_text = str(response)
    
    # 检查并提取SQL代码块
    sql_blocks = re.findall(r'```sql\n(.*?)\n```', response_text, re.DOTALL)

    if sql_blocks:
        sql_exec_result = []
        for i, sql in enumerate(sql_blocks, 1):
            sql_str = sql.strip()
            print(f"\nSQL代码块 {i}:\n{sql_str}\n")
            print("准备执行SQL...")
            query_mysql_tool = FunctionTool.from_defaults(fn=mysql.query_mysql,name="query_mysql")
            ret = Settings.llm.predict_and_call(
                [query_mysql_tool],
                sql_str,
                # verbose=True
            )
            # print(f"\nSQL执行结果 {i}:\n{str(ret)}\n")
            sql_exec_result.append(ret)

        return sql_exec_result

    else :
        # 一次性输出
        # print(f"回答：{response}")
        if hasattr(response, 'response'):  # For StreamingResponse or similar
            return "常规输出:" + str(response.response)
        elif isinstance(response, str):
            return "常规输出:" + response
        else:
            return "常规输出:" + str(response)
    

# 如果需要重置记忆，可以添加这个函数
def reset_memory(chat_engine):
    chat_engine.reset()
    print("对话记忆已重置")

# 如果需要查看当前记忆内容，可以添加这个函数
def show_memory(chat_engine):
    memory = chat_engine.memory
    print("当前对话记忆:")
    for i, msg in enumerate(memory.get()):
        print(f"{i+1}. {msg.role}: {msg.content}")

# if __name__ == "__main__":
#     query_engine = init()
#     res = getAnswer(query_engine,"我想查询丹顶鹤的全量数据，请输出sql查询语句，要包含媒体文件信息")
