
import init_env_impl
from langchain_openai import ChatOpenAI
model = "deepseek-chat"
model = ChatOpenAI(model=model)
"""

        PromptTemplate,ChatPromptTemplate

"""
# # PromptTemplate 单轮任务（如文本生成、问答补全等），无需区分角色（如用户/系统/AI）。
# # ChatPromptTemplate 多轮对话、需要区分角色（如系统设置、用户提问、AI回复）的场景
# from langchain_core.prompts import PromptTemplate,ChatPromptTemplate

# model = ChatOpenAI(model="deepseek-chat")

# # prompt_template = ChatPromptTemplate.from_messages(
# #     [
# #         ("system","{system_input}"),
# #         ("user","{input}")
# #     ]
# # )


# prompt_template = PromptTemplate.from_template("""
# {system_input}
# {input}
# """)

# prompt = prompt_template.invoke({"system_input":"你是一个翻译,翻译下面这个句子或单词","input":"apple"})
# print(prompt)




# from langchain_community.tools.tavily_search import TavilySearchResults
# from langchain_core.prompts import ChatPromptTemplate
# from langchain.globals import set_debug
# from langchain.agents import AgentExecutor, create_tool_calling_agent

# prompt_template = ChatPromptTemplate.from_messages(
#     [
#         ("system", "{system_input}"),
#         ("placeholder", "{chat_history}"),
#         ("user","{input}"),
#         ("placeholder","{agent_scratchpad}")
#     ]
# )

# tools = [TavilySearchResults(max_results=1)]

# agent = create_tool_calling_agent(model, tools, prompt_template)
# set_debug(True)  # -----------> 打印日志
# agent_executor = AgentExecutor(agent=agent, tools=tools)
# response = agent_executor.invoke({"input": "谁执导了2023年的电影《奥本海默》，他多少岁了？","system_input":"你是一位得力的助手。"})

# print(response)








# from langchain.agents import AgentExecutor, create_tool_calling_agent
# from langchain_community.tools.tavily_search import TavilySearchResults
# from langchain_core.prompts import ChatPromptTemplate
# from langchain.globals import set_debug
# from langchain.globals import set_verbose

# llm = ChatOpenAI(model="deepseek-chat")
# tools = [TavilySearchResults(max_results=1)]
# prompt = ChatPromptTemplate.from_messages(
    # [
    #     ("system", "你是一位得力的助手。",),
    #     ("placeholder", "{chat_history}"),
    #     ("human", "{input}"),
    #     ("placeholder", "{agent_scratchpad}"),
    # ]
# )
# # 构建工具代理
# agent = create_tool_calling_agent(llm, tools, prompt)
# # 打印调试日志
# set_debug(True)
# # 不输出详细日志
# #set_verbose(False)
# # 通过传入代理和工具来创建代理执行器
# agent_executor = AgentExecutor(agent=agent, tools=tools)
# response = agent_executor.invoke(
#     {"input": "谁执导了2023年的电影《奥本海默》，他多少岁了？"}
# )
# print(response)





# from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
# from langchain_core.prompts import ChatPromptTemplate

# prompt_template = ChatPromptTemplate.from_messages(
#     [
#         ("system","{sys_input}"),
#         ("user","{user_input}")
#     ]
# )

# print(prompt_template.invoke({"sys_input":"你是一个翻译助手，翻译下面的英文单词或句子","user_input":"apple"}))

# # parse = StrOutputParser()
# # chain = prompt_template | model | parse
# chain = prompt_template | model | StrOutputParser()

# response = chain.invoke({"sys_input":"你是一个翻译助手，翻译下面的英文单词或句子,使用接送格式返回","user_input":"apple"})
# print(response)








# import asyncio
# from langchain_core.output_parsers import StrOutputParser
# from langchain_core.prompts import ChatPromptTemplate

# prompt = ChatPromptTemplate.from_template("给我讲一个关于{topic}的笑话")
# parser = StrOutputParser()
# chain = prompt | model | parser

# for chunk in chain.stream({"topic":"小明"}):
#     print(chunk,end="",flush=True)   # 如果不加 flush=True不会一个字一个字的打

# # async def async_stream():
# #     async for chunk in chain.astream({"topic": "鹦鹉"}):
# #         print(chunk, end="|", flush=True)

# # # 运行异步流处理
# # asyncio.run(async_stream())    





# import asyncio
# from langchain_core.output_parsers import JsonOutputParser
# from langchain_core.output_parsers import StrOutputParser
# from langchain_openai import ChatOpenAI


# chain = (
#         model | JsonOutputParser()
#     # 由于Langchain旧版本中的一个错误，JsonOutputParser未能从某些模型中流式传输结果
# )


# async def async_stream():
#     async for text in chain.astream(
#             "以JSON 格式输出法国、西班牙和日本的国家及其人口列表。"
#             '使用一个带有“countries”外部键的字典，其中包含国家列表。'
#             "每个国家都应该有键`name`和`population`"
#     ):
#         print(text, flush=True)


# # 运行异步流处理
# asyncio.run(async_stream())




# -------------------------------封装 可执行 调用-----------------------------
# import asyncio
# from langchain_core.runnables import RunnableLambda
# from langchain_core.tools import tool



# def reverse_word(word: str):
#     return word[::-1]

# reverse_word = RunnableLambda(reverse_word)

# @tool
# def bad_tool(word: str):
#     """不传播回调的自定义工具。"""
#     return reverse_word.invoke(word)

# for event in bad_tool.stream("hello", version="v2"):
#     print("result---> ", event)





# # 异步流处理
# async def async_stream():
#     def reverse_word(word: str):
#         return word[::-1]

#     reverse_word = RunnableLambda(reverse_word)

#     @tool
#     def bad_tool(word: str):
#         """不传播回调的自定义工具。"""
#         return reverse_word.invoke(word)

#     async for event in bad_tool.astream_events("hello", version="v2"):
#         print("result---> ", event)


# # 运行异步流处理
# asyncio.run(async_stream())
# # """
# # result--->  {'event': 'on_tool_start', 'data': {'input': 'hello'}, 'name': 'bad_tool', 'tags': [], 'run_id': 'ef0738d3-9e78-4ba1-be6f-faf077fc026c', 'metadata': {}, 'parent_ids': []}

# # result--->  {'event': 'on_chain_start', 'data': {'input': 'hello'}, 'name': 'reverse_word', 'tags': [], 'run_id': 'bad2b9ec-417c-4d04-9cd6-1a2f58b93641', 'metadata': {}, 'parent_ids': ['ef0738d3-9e78-4ba1-be6f-faf077fc026c']}

# # result--->  {'event': 'on_chain_end', 'data': {'output': 'olleh', 'input': 'hello'}, 'run_id': 'bad2b9ec-417c-4d04-9cd6-1a2f58b93641', 'name': 'reverse_word', 'tags': [], 'metadata': {}, 'parent_ids': ['ef0738d3-9e78-4ba1-be6f-faf077fc026c']}
# # result--->  {'event': 'on_tool_end', 'data': {'output': 'olleh'}, 'run_id': 'ef0738d3-9e78-4ba1-be6f-faf077fc026c', 'name': 'bad_tool', 'tags': [], 'metadata': {}, 'parent_ids': []}
# # """







# import os, time
# from langchain.globals import set_llm_cache
# from langchain_core.caches import InMemoryCache
# from langchain_community.cache import SQLiteCache

# # 创建LLM实例
# set_llm_cache(InMemoryCache())
# # set_llm_cache(SQLiteCache(database_path=".langchain.db"))

# def measure_invoke_time(llm, prompt):
#     # 记录开始时间
#     start_wall_time = time.time()
#     start_cpu_times = os.times()

#     # 调用LLM
#     response = llm.invoke(prompt)

#     # 记录结束时间
#     end_wall_time = time.time()
#     end_cpu_times = os.times()

#     # 计算经过的时间
#     wall_time = end_wall_time - start_wall_time
#     user_time = end_cpu_times.user - start_cpu_times.user
#     sys_time = end_cpu_times.system - start_cpu_times.system
#     total_cpu_time = user_time + sys_time

#     return response, wall_time, user_time, sys_time, total_cpu_time

# # 第一次调用
# response1, wall_time1, user_time1, sys_time1, total_cpu_time1 = measure_invoke_time(model, "给我讲个笑话")
# print("First call response:", response1)
# print(f"First call CPU times: user {user_time1 * 1000:.0f} ms, sys: {sys_time1 * 1000:.0f} ms, total: {total_cpu_time1 * 1000:.0f} ms")
# print(f"First call Wall time: {wall_time1 * 1000:.0f} ms")

# # 第二次调用
# response2, wall_time2, user_time2, sys_time2, total_cpu_time2 = measure_invoke_time(model, "给我讲个笑话")

# print("Second call response:", response2)
# print(f"Second call CPU times: user {user_time2 * 1000:.0f} ms, sys: {sys_time2 * 1000:.0f} ms, total: {total_cpu_time2 * 1000:.0f} ms")
# print(f"Second call Wall time: {wall_time2 * 1000:.0f} ms")










# from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
# from langchain_community.chat_message_histories import ChatMessageHistory
# from langchain_core.chat_history import BaseChatMessageHistory
# from langchain_core.runnables.history import RunnableWithMessageHistory
# from langchain_core.runnables import ConfigurableFieldSpec

# prompt = ChatPromptTemplate.from_messages(
#     [
#         (
#             "system",
#             "You're an assistant who's good at {ability}. Respond in 20 words or fewer",
#         ),
#         MessagesPlaceholder(variable_name="history"),
#         ("human", "{input}"),
#     ]
# )
# runnable = prompt | model
# store = {}


# def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
#     if (user_id, conversation_id) not in store:
#         store[(user_id, conversation_id)] = ChatMessageHistory()
#     return store[(user_id, conversation_id)]


# with_message_history = RunnableWithMessageHistory(
#     runnable,
#     get_session_history,
#     input_messages_key="input",
#     history_messages_key="history",
#     history_factory_config=[
#         ConfigurableFieldSpec(
#             id="user_id",
#             annotation=str,
#             name="User ID",
#             description="用户的唯一标识符。",
#             default="",
#             is_shared=True,
#         ),
#         ConfigurableFieldSpec(
#             id="conversation_id",
#             annotation=str,
#             name="Conversation ID",
#             description="对话的唯一标识符。",
#             default="",
#             is_shared=True,
#         ),
#     ],
# )

# response = with_message_history.invoke(
#     {"ability": "math", "input": "余弦是什么意思？"},
#     config={"configurable": {"user_id": "123", "conversation_id": "1"}},
# )
# print(response)
# #content='余弦是一个三角函数，它表示直角三角形的邻边长度和斜边长度的比值。' response_metadata={'token_usage': {'completion_tokens': 33, 'prompt_tokens': 38, 'total_tokens': 71}, 'model_name': 'gpt-4-0613', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-2d1eba02-4709-4db5-ab6b-0fd03ab4c68a-0' usage_metadata={'input_tokens': 38, 'output_tokens': 33, 'total_tokens': 71}


# # 记住
# response = with_message_history.invoke(
#     {"ability": "math", "input": "什么?"},
#     config={"configurable": {"user_id": "123", "conversation_id": "1"}},
# )
# print(response)
# #content='余弦是一个数学术语，代表在一个角度下的邻边和斜边的比例。' response_metadata={'token_usage': {'completion_tokens': 32, 'prompt_tokens': 83, 'total_tokens': 115}, 'model_name': 'gpt-4-0613', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-99368d03-c2ed-4dda-a32f-677c036ad676-0' usage_metadata={'input_tokens': 83, 'output_tokens': 32, 'total_tokens': 115}


# # 新的 user_id --> 不记得了。
# response = with_message_history.invoke(
#     {"ability": "math", "input": "什么?"},
#     config={"configurable": {"user_id": "123", "conversation_id": "2"}},
# )
# print(response)
# #content='对不起，我没明白您的问题。你能更明确地表达你的数学问题吗？' response_metadata={'token_usage': {'completion_tokens': 29, 'prompt_tokens': 32, 'total_tokens': 61}, 'model_name': 'gpt-4-0613', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-48ff0adf-8f7d-48bc-a137-680c31d6e6ab-0' usage_metadata={'input_tokens': 32, 'output_tokens': 29, 'total_tokens': 61}


# from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings

# def res_embding():
#     embedding = HuggingFaceEmbeddings(model_name='moka-ai/m3e-base')
#     if embedding is None:
#         return "无法加载embedding模型"
#     return embedding

# from langchain_community.tools.tavily_search import TavilySearchResults
# from langchain.tools.retriever import create_retriever_tool
# from langchain.document_loaders import WebBaseLoader
# # from langchain_community.vectorstores import FAISS
# # from langchain_community.embeddings import OpenAIEmbeddings #DashScopeEmbeddings  # 阿里云
# from langchain.vectorstores import Chroma
# from langchain_text_splitters import RecursiveCharacterTextSplitter
# from langchain import hub

# # model = ChatOpenAI(model="gpt-4")
# loader = WebBaseLoader("https://zh.wikipedia.org/wiki/%E7%8C%AB")
# docs = loader.load()
# # print(docs)
# document = RecursiveCharacterTextSplitter(
#     chunk_size= 4000,chunk_overlap=200
# ).split_documents(docs)
# # print(document)
# # vector = FAISS.from_documents(document, OpenAIEmbeddings())
# # recver = vector.as_retriever()

# embedding = res_embding()
# vector = Chroma.from_documents(documents=document, embedding=embedding)
# retriever = vector.as_retriever()


# recver_tool = create_retriever_tool(
#     retriever,"wiki_search","搜索维基百科",
# )

# searcher = TavilySearchResults(max_results=1)
# tools = [searcher, recver_tool]

# prompt = hub.pull("hwchase17/openai-functions-agent")
# print(prompt.messages)

# from langchain.agents import create_tool_calling_agent
# agent = create_tool_calling_agent(model, tools, prompt)

# from langchain.agents import AgentExecutor
# agent_executor = AgentExecutor(agent=agent, tools=tools)




# from langchain_community.tools.tavily_search import TavilySearchResults
# # pip install langchain
# from langchain.tools.retriever import create_retriever_tool
# from langchain_community.document_loaders import WebBaseLoader
# # pip install faiss-cpu
# # from langchain_community.vectorstores import FAISS
# # from langchain_openai import OpenAIEmbeddings
# from langchain.vectorstores import Chroma
# from langchain_text_splitters import RecursiveCharacterTextSplitter

# search = TavilySearchResults(max_results=1)

# loader = WebBaseLoader("https://zh.wikipedia.org/wiki/%E7%8C%AB")
# docs = loader.load()
# documents = RecursiveCharacterTextSplitter(
#     chunk_size=1000, chunk_overlap=200
# ).split_documents(docs)

# embedding = res_embding()
# vector = Chroma.from_documents(documents=documents, embedding=embedding)
# retriever = vector.as_retriever()
# # vector = FAISS.from_documents(documents, HuggingFaceEmbeddings("moka-ai/m3e-base"))
# # retriever = vector.as_retriever()

# retriever_tool = create_retriever_tool(
#     retriever,
#     "wiki_search",
#     "搜索维基百科",
# )


# from langchain_core.messages import HumanMessage

# tools = [search, retriever_tool]
# model_with_tools = model.bind_tools(tools)


# response = model_with_tools.invoke([HumanMessage(content="你好")])
# print(f"ContentString: {response.content}")
# print(f"ToolCalls: {response.tool_calls}")


# print("------------------------------------------------------------------------------")

# response = model_with_tools.invoke([HumanMessage(content="今天上海天气怎么样")])
# print(f"ContentString: {response.content}")
# print(f"ToolCalls: {response.tool_calls}")






# from langchain_community.tools.tavily_search import TavilySearchResults

# search = TavilySearchResults(max_results=1)

# # pip install langchain
# from langchain.tools.retriever import create_retriever_tool
# from langchain_community.document_loaders import WebBaseLoader
# from langchain.vectorstores import Chroma
# # pip install faiss-cpu
# from langchain_text_splitters import RecursiveCharacterTextSplitter

# loader = WebBaseLoader("https://zh.wikipedia.org/wiki/%E7%8C%AB")
# docs = loader.load()
# documents = RecursiveCharacterTextSplitter(
#     chunk_size=1000, chunk_overlap=200
# ).split_documents(docs)

# vector = Chroma.from_documents(documents, res_embding())
# retriever = vector.as_retriever()

# retriever_tool = create_retriever_tool(
#     retriever,
#     "wiki_search",
#     "搜索维基百科",
# )


# tools = [search, retriever_tool]

# from langchain import hub
# # 获取要使用的提示 - 您可以修改这个！
# prompt = hub.pull("hwchase17/openai-functions-agent")

# from langchain.agents import create_tool_calling_agent
# agent = create_tool_calling_agent(model, tools, prompt)

# from langchain.agents import AgentExecutor
# agent_executor = AgentExecutor(agent=agent, tools=tools)

# #print(agent_executor.invoke({"input": "你好，我的名字是Cyber", "chat_history": []}))
# from langchain_core.messages import AIMessage, HumanMessage

# response = agent_executor.invoke(
#     {
#         "chat_history": [
#             HumanMessage(content="Hi，我的名字是Cyber"),
#             AIMessage(content="你好，Cyber，很高兴见到你！有什么我可以帮助你的吗？"),
#         ],
#         "input": "我的名字是什么?",
#     }
# )
# print(response)











# # pip install langchain
# from langchain.tools.retriever import create_retriever_tool
# from langchain_community.document_loaders import WebBaseLoader
# #pip install faiss-cpu
# from langchain_community.vectorstores import FAISS
# from langchain.vectorstores import Chroma
# from langchain_openai import OpenAIEmbeddings
# from langchain_text_splitters import RecursiveCharacterTextSplitter

# loader = WebBaseLoader("https://zh.wikipedia.org/wiki/%E7%8C%AB")
# docs = loader.load()
# documents = RecursiveCharacterTextSplitter(
#     # chunk_size 参数在 RecursiveCharacterTextSplitter 中用于指定每个文档块的最大字符数。它的作用主要有以下几个方面：
#     # chunk_overlap 参数用于指定每个文档块之间的重叠字符数。这意味着，当文档被拆分成较小的块时，每个块的末尾部分会与下一个块的开头部分有一定数量的重叠字符。
#     # 第一个块包含字符 1 到 1000。第二个块包含字符 801 到 1800。第三个块包含字符 1601 到 2600。
#     chunk_size=1000, chunk_overlap=200
# ).split_documents(docs)


# vector = Chroma.from_documents(documents, res_embding())
# retriever = vector.as_retriever()

# print(retriever.invoke("猫的特征")[0])

# retriever_tool = create_retriever_tool(
#     retriever,
#     "wiki_search",
#     "搜索维基百科",
# )









# from langchain_community.tools.tavily_search import TavilySearchResults

# search = TavilySearchResults(max_results=2)
# print(search.invoke("今天北京天气怎么样"))









# import base64
# import httpx
# from langchain_core.messages import HumanMessage

# image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
# image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
# message = HumanMessage(
#     content=[
#         {"type": "text", "text": "用中文描述这张图片中的天气"},
#         {"type": "image_url", "image_url": {"url": image_url}},
#     ],
# )
# response = model.invoke([message])
# print(response.content)

# #response = model.invoke([message])
# #print(response.content)




# # pip install -qU langchain langchain-openai
# from langchain_core.output_parsers import JsonOutputParser
# from langchain_core.prompts import PromptTemplate
# #langchain v0.2 使用以下引用pydantic
# from langchain_core.pydantic_v1 import BaseModel, Field
# #langchain v0.3 使用以下引用pydantic
# #from pydantic import BaseModel, Field


# # 定义您期望的数据结构。
# class Joke(BaseModel):
#     AAA: str = Field(description="设置笑话的问题")
#     BBB: str = Field(description="解决笑话的答案")
#     # setup: str = Field(description="设置笑话的问题")
#     # punchline: str = Field(description="解决笑话的答案")


# # 还有一个用于提示语言模型填充数据结构的查询意图。
# joke_query = "告诉我一个笑话。"
# # 设置解析器 + 将指令注入提示模板。
# parser = JsonOutputParser(pydantic_object=Joke)
# prompt = PromptTemplate(
#     template="回答用户的查询。\n{format_instructions}\n{query}\n",
#     input_variables=["query"],
#     partial_variables={"format_instructions": parser.get_format_instructions()},
# )
# print("-----1------->  ",parser.get_format_instructions(),"\n\n\n\n\n\n")
# print("-----2------->  ",prompt,"\n\n\n\n\n\n")
# chain = prompt | model | parser
# response = chain.invoke({"query": joke_query})
# print(response)










# # pip install -qU langchain langchain-openai
# from langchain_core.output_parsers import JsonOutputParser
# from langchain_core.prompts import PromptTemplate


# joke_query = "告诉我一个笑话。"
# parser = JsonOutputParser()
# prompt = PromptTemplate(
#     template="回答用户的查询。\n{format_instructions}\n{query}\n",
#     input_variables=["query"],
#     partial_variables={"format_instructions": parser.get_format_instructions()},
# )
# chain = prompt | model | parser
# response = chain.invoke({"query": joke_query})
# print(response)











# from langchain_core.tools import StructuredTool
# import asyncio

# def multiply(a: int, b: int) -> int:
#     """Multiply two numbers."""
#     return a * b

# async def amultiply(a: int, b: int) -> int:
#     """Multiply two numbers."""
#     return a * b

# async def main():
#     # func 参数：指定一个同步函数。当你在同步上下文中调用工具时，它会使用这个同步函数来执行操作。
#     # oroutine 参数：指定一个异步函数。当你在异步上下文中调用工具时，它会使用这个异步函数来执行操作。
#     calculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)
#     print(calculator.invoke({"a": 2, "b": 3}))
#     print(await calculator.ainvoke({"a": 2, "b": 5}))

# # 运行异步主函数
# asyncio.run(main())



# from langchain_core.tools import StructuredTool
# from pydantic import BaseModel, Field
# import asyncio

# class CalculatorInput(BaseModel):
#     a: int = Field(description="first number")
#     b: int = Field(description="second number")

# def multiply(a: int, b: int) -> int:
#     """Multiply two numbers."""
#     return a * b

# # 创建一个异步包装器函数
# async def async_addition(a: int, b: int) -> int:
#     """Multiply two numbers."""
#     return a + b
    
# async def main():
#     calculator = StructuredTool.from_function(
#         func=multiply,
#         name="Calculator",
#         description="multiply numbers",
#         args_schema=CalculatorInput,
#         return_direct=True,
#         #coroutine= async_addition
#         # coroutine= ... <- 如果需要，也可以指定异步方法
#     )
#     print(calculator.invoke({"a": 2, "b": 3}))
#     #print(await calculator.ainvoke({"a": 2, "b": 5}))
#     print(calculator.name)
#     print(calculator.description)
#     print(calculator.args)

# # 运行异步主函数
# asyncio.run(main())



# from langchain_core.tools import tool

# @tool
# async def amultiply(a: int, b: int) -> int:
#     """Multiply two numbers."""
#     return a * b

# # 让我们检查与该工具关联的一些属性。
# print(amultiply.name)
# print(amultiply.description)
# print(amultiply.args)


# from pydantic import BaseModel, Field
# from langchain_core.tools import tool
# class CalculatorInput(BaseModel):
#     a: int = Field(description="first number")
#     b: int = Field(description="second number")

# @tool("multiplication-tool", args_schema=CalculatorInput, return_direct=True)
# def multiply(a: int, b: int) -> int:
#     """Multiply two numbers."""
#     return a * b

# # 让我们检查与该工具关联的一些属性。
# print(multiply.name)
# print(multiply.description)
# print(multiply.args)
# print(multiply.return_direct)




# from langchain_core.tools import StructuredTool
# from langchain_core.tools import ToolException

# def get_weather(city: str) -> int:
#     """获取给定城市的天气。"""
#     raise ToolException(f"错误：没有名为{city}的城市。")

# def _handle_error(error: ToolException) -> str:
#     return f"工具执行期间发生以下错误：`{error.args[0]}`"

# get_weather_tool = StructuredTool.from_function(
#     func=get_weather,
#     handle_tool_error=True,
#     # handle_tool_error="没找到这个城市",
#     # handle_tool_error=_handle_error
# )

# response = get_weather_tool.invoke({"city": "foobar"})
# print(response)




# from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
# from langchain_community.utilities import SQLDatabase
# from langchain_community.agent_toolkits.sql.base import create_sql_agent
# from langchain.agents.agent_types import AgentType

# db = SQLDatabase.from_uri("sqlite:///langchain.db")
# toolkit = SQLDatabaseToolkit(db=db, llm=model)
# print("--------->  ", toolkit.get_tools())

# agent_executor = create_sql_agent(
#     llm=model,
#     toolkit=toolkit,
#     verbose=False,
#     agent_type=AgentType.OPENAI_FUNCTIONS
# )
# result = agent_executor.invoke("Describe the full_llm_cache table")
# print("---result---->  ", result)



# from langchain_community.tools import WikipediaQueryRun
# from langchain_community.utilities import WikipediaAPIWrapper
# from pydantic import BaseModel, Field

# api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
# class WikiInputs(BaseModel):
#     """维基百科工具的输入。"""
#     query: str = Field(
#         description="query to look up in Wikipedia, should be 3 or less words"
#     )
# tool = WikipediaQueryRun(
#     name="wiki-tool",
#     description="look up things in wikipedia",
#     args_schema=WikiInputs,
#     api_wrapper=api_wrapper,
#     #如果 return_direct 设置为 True，工具会直接返回查询结果，例如一个字符串或一个简单的数据结构。
#     #如果 return_direct 设置为 False，工具可能会返回一个更复杂的响应对象，其中包含更多的元数据或结构化信息。
#     return_direct=True,
# )

# print(tool.run("langchain"))
# print(f"Name: {tool.name}")
# print(f"Description: {tool.description}")
# print(f"args schema: {tool.args}")
# print(f"returns directly?: {tool.return_direct}")

# Page: LangChain
# Summary: LangChain is a software framework that helps facilitate the integration of
# Name: wiki-tool
# Description: look up things in wikipedia
# args schema: {'query': {'description': 'query to look up in Wikipedia, should be 3 or less words', 'title': 'Query', 'type': 'string'}}
# returns directly?: True



# # pip install -qU wikipedia
# from langchain_community.tools import WikipediaQueryRun
# from langchain_community.utilities import WikipediaAPIWrapper

# api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
# tool = WikipediaQueryRun(api_wrapper=api_wrapper)
# print(tool.invoke({"query": "langchain"}))

# print(f"Name: {tool.name}")
# print(f"Description: {tool.description}")
# print(f"args schema: {tool.args}")
# print(f"returns directly?: {tool.return_direct}")

















from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings

def res_embding():
    embedding = HuggingFaceEmbeddings(model_name='moka-ai/m3e-base')
    if embedding is None:
        return "无法加载embedding模型"
    return embedding


embedding_model= res_embding()

embeddings = embedding_model.embed_documents(
    [
        "嗨！",
        "哦，你好！",
        "你叫什么名字？",
        "我的朋友们叫我World",
        "Hello World！"
    ]
)
#第一个打印是embeddings的文本数量，第二个打印是第一段文本的embedding向量维度
print(len(embeddings), len(embeddings[0]))
