# import json
# from langchain_ollama import OllamaEmbeddings
# from framework.prompt.DocLoader import DocLoader
# from framework.LLM_controller.interface import LLMcontroller
# from framework.LLM_controller.providers import deepseek, openai, ollama
# import tool.config.config as cfg
# from langchain.schema import Document

# config = cfg.get_config()

# RAG_TEMPLATE = """
#     You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If the context is null, just say that you don't know.

#     <context>
#     {context}
#     </context>

#     Answer the following question:

#     {question}"""

# local_embeddings = OllamaEmbeddings(model="bge-m3:latest",base_url="http://localhost:11434")

# docloader = DocLoader("./chroma_db",local_embeddings)

# # docloader.add_documents_to_vectorstore("./uploads/helloyutao/20250714172406.png")
# # print("文档已添加到向量存储")

# question = "现在你作为一名老师，以老师的口吻讲解一下这道题"
# print(f"question is:{question}")
# file = "./uploads/helloyutao/20250714172406.png"
# all_data = docloader.vectostore.get()
# docs = []
# for content, metadata in zip(all_data["documents"], all_data["metadatas"]):
#     if metadata.get("source") == file:
#         docs.append(Document(page_content=content, metadata=metadata))

# context_str = "\n".join(doc.page_content for doc in docs)

# print(f"check db:{context_str}")

# # docloader.add_documents_to_vectorstore("./framework/data/终稿_彭玉涛_20250525195955.pdf","./chroma_db")

# # promptController = PromptController(docloader,RAG_TEMPLATE)

# llm =  LLMcontroller()

# llm.register_model("deepseek", deepseek.handler_factory)
# llm.register_model("openai", openai.handler_factory)
# llm.register_model("ollama", ollama.handler_factory)

# llm.acceptQuery(query=RAG_TEMPLATE.format(context=context_str,question=question),provider="ollama",model="qwen3:8b")

# for chunk in llm.stream():
#     print(json.loads(chunk)["message"]["content"], end="", flush=True)

# listener = VoiceRecorder()

# waker = Waker(config['AUDIO']['WAKE_WORDS'],config['AUDIO']['SLEEP_WORDS'])

# history = []

# def start_chat():
#     while True:
#         # question = input("\n>> ")
#         question = listener.start_recording()
#         if question=="":
#             continue
#         if waker.sleep(question):#匹配休眠模式
#             break
#         print(f"question is:{question}")
#         message = promptController.gen_message(question)
#         model.acceptQuery(message)
#         for chunk in model.stream():
#             print(chunk, end="", flush=True)
#         # response += chunk

if __name__ == "__main__":
    # while True:
    #     question = listener.start_recording()
    #     if waker.wake(question):#匹配唤醒模式
    #         print("触发唤醒")
    #         start_chat()
    import pyaudio
    p = pyaudio.PyAudio()
    for i in range(p.get_device_count()):
        print(p.get_device_info_by_index(i))