import langsmith
from langchain.chat_models import init_chat_model
import os
import json

from langchain_core.chat_history import BaseChatMessageHistory, InMemoryChatMessageHistory
from langchain_core.example_selectors import LengthBasedExampleSelector, length_based,SemanticSimilarityExampleSelector
from langchain_core.messages import SystemMessage, HumanMessage, BaseMessage, AIMessage, trim_messages, filter_messages
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate, MessagesPlaceholder, \
    FewShotChatMessagePromptTemplate, FewShotPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnableWithMessageHistory
from langchain_core.tools import tool
from langchain_core.utils.function_calling import tool_example_to_messages
from pydantic import BaseModel, Field
from langchain_core.tools import StructuredTool
from typing import Tuple, Iterator, List, Optional
from typing import TypedDict, Annotated
from datetime import datetime
from langchain_tavily import TavilySearch
from langsmith import uuid7
import asyncio
import warnings
# 或者更精确地过滤
warnings.filterwarnings("ignore", message="LangSmith now uses UUID v7")
model = init_chat_model(
    model='deepseek-chat',
    model_provider='deepseek',
    api_key=os.getenv('DEEPSEEK_API_KEY'),
    config_prefix='first',
    configurable_fields=['model', 'model_provider'],
    temperature=0.3
)
# messages = [
#     SystemMessage(content='你是编译原理专家，只回答编译原理相关问题以及所衍生的问题，除此之外拒绝回答'),
#     HumanMessage(content='求积分的方法')
# ]
# parser = StrOutputParser()
# chain = model | parser
# res = chain.invoke(
#     messages,
#     config={
#         "configurable": {
#             "first_model": 'deepseek-chat',
#             "first_model_provider": 'deepseek'
#         }
#     }
# )


class WeatherInput(BaseModel):
    """查询天气"""
    location: str = Field(description="城市名称")
    date: datetime = Field(description="查询日期")


def get_weather(location: str, date: datetime) -> Tuple[str, str]:
    """
        查询天气
        Args:
            location: 城市名称
            date: 查询日期
        Returns:
            天气信息
    """
    return f"1.在{location}，{date}，温度为12摄氏度", f"2.在{location}，{date}，温度为13摄氏度"


get_weather = StructuredTool.from_function(
    description="查询天气",
    func=get_weather,
    args_schema=WeatherInput,
    response_format="content_and_artifact"
)


class MulInput(BaseModel):
    """乘法"""
    a: int = Field(description="第一个数")
    b: int = Field(description="第二个数")


def mul(a: int, b: int) -> Tuple[int, bool]:
    """
        乘法
        Args:
            a: 第一个数
            b: 第二个数
        Returns:
            两个数的乘积
    """
    return a * b, a * b != 0


mul = StructuredTool.from_function(
    description="乘法",
    func=mul,
    args_schema=MulInput,
    response_format="content_and_artifact"
)


# 转换为字典再转JSON
def message_to_dict(message):
    """将消息对象转换为字典"""
    result = {}

    # 基础字段
    if hasattr(message, 'type'):
        result["type"] = type(message).__name__
    if hasattr(message, 'content') and message.content is not None:
        result["content"] = message.content

    # 可选字段
    if hasattr(message, 'additional_kwargs') and message.additional_kwargs:
        result["additional_kwargs"] = message.additional_kwargs
    if hasattr(message, 'response_metadata') and message.response_metadata:
        result["response_metadata"] = message.response_metadata
    if hasattr(message, 'id') and message.id:
        result["id"] = message.id
    if hasattr(message, 'tool_calls') and message.tool_calls:
        result["tool_calls"] = message.tool_calls
    if hasattr(message, 'usage_metadata') and message.usage_metadata:
        result["usage_metadata"] = message.usage_metadata
    if hasattr(message, 'artifact') and message.artifact:
        result["artifact"] = message.artifact
    return result


# tavily_search = TavilySearch(
#     max_results=10
# )
# model_with_tools = model.bind_tools([get_weather, mul, tavily_search], tool_choice="any")
# # print(os.getenv("TAVILY_API_KEY"))
#
#
# class Agree(BaseModel):
#     """支持率"""
#     male: int = Field(description="男性支持率")
#     female: int = Field(description="女性支持率")
#
#
# class Agree(TypedDict):
#     """支持率"""
#     male: Annotated[int, "男性支持率"]
#     female: Annotated[int, "女性支持率"]
#
#
# messages = [
#     HumanMessage(content="高市早苗在日本支持率如何")
# ]
#
# ai_msg = model_with_tools.invoke(
#     messages
# )
# messages.append(ai_msg)
# for tool_call in ai_msg.tool_calls:
#     selected_tool = {"mul": mul, "get_weather": get_weather, "tavily_search": tavily_search}[tool_call.get("name").lower()]
#     tool_msg = selected_tool.invoke(tool_call)
#     messages.append(tool_msg)
#
#
#
# model_with_structured = model.with_structured_output(
#     Agree,
#     include_raw=True
# )
# model_with_structured = model.with_structured_output(
#     Agree,
#     include_raw=True
# )
# messages.append(
#     HumanMessage(content="请根据工具调用的结果，返回支持率")
# )
# res_msg = json.dumps(message_to_dict(model_with_structured.invoke(messages)), indent=2, ensure_ascii=False)
# prompt = """
#     请根据工具调用的结果，返回支持率
#     工具调用结果：{tool_calls}
#
# """
# res_msg = model_with_structured.invoke(messages)
# print(json.dumps(message_to_dict(res_msg['raw']), indent=2, ensure_ascii=False))
# print("\n")
# print(res_msg['parsed'])
# print('\n')
# print(res_msg['parsing_error'])
# print('\n')
# print(res_msg)

# chunks = []
# messages = [
#     HumanMessage(content='讲个100字的笑话，并附带上笑点解析')
# ]
#
#
# async def async_stream():
#     async for chunk in model.astream(messages):
#         chunks.append(chunk)
#         print(chunk.content, end='|')
#
#
# async def main():
#     task = asyncio.create_task(async_stream())
#     await task
#
#
# asyncio.run(main())
# print("执行完毕")


# async def func():
#     await asyncio.sleep(5)
#     print("test")
# async def func2():
#     await asyncio.sleep(5)
#     print("test2")
# async def main():
#     task1 = asyncio.create_task(func())
#     task2 = asyncio.create_task(func2())
#     await task1
#     await task2
# asyncio.run(main())
# print("执行完毕")


# def split_into_list(text: Iterator[str]) -> Iterator[str]:
#     sentence = ""
#     for chunk in text:
#         for c in chunk:
#             if c == "。":
#                 sentence += c
#                 if sentence != "。":
#                     yield sentence
#                 sentence = ""
#             else:
#                 sentence += c
#     if sentence:
#         yield sentence + "。"

#
# messages = [
#     HumanMessage(content="我是roboko")
# ]

# parser = StrOutputParser()
# chain = model | parser
# store = {}
# def get_session_history(session_id: str) -> BaseChatMessageHistory:
#     if session_id not in store:
#         store[session_id] = InMemoryChatMessageHistory()
#     return store[session_id]
#
# chain_with_history = RunnableWithMessageHistory(chain, get_session_history)
# run_id = str(uuid7())
# config = {
#     "configurable": {
#         "run_id": run_id,
#         "session_id": "1"
#     }
# }
# ai_msg = chain_with_history.invoke(
#     messages,
#     config=config
# )
#
# print(ai_msg)
#
# ai_msg = chain_with_history.invoke(
#     [HumanMessage(content="我是谁")],
#     config=config
# )
# print(ai_msg)


# messages = [
#     SystemMessage(content="you're a good assistant", id='1'),
#     HumanMessage(content="hi! I'm bob", id='2'),
#     AIMessage(content="hi!", id='3'),
#     HumanMessage(content="I like vanilla ice cream", id='4'),
#     AIMessage(content="nice", id='5'),
#     HumanMessage(content="whats 2 + 2", id='6'),
#     AIMessage(content="4", id='7'),
#     HumanMessage(content="thanks", id='8'),
#     AIMessage(content="no problem!", id='9'),
#     HumanMessage(content="having fun?", id='10'),
#     AIMessage(content="yes!", id='11'),
#     HumanMessage(content="What's my name?", id='12'),
# ]
#
# human_filter = filter_messages(
#     include_types=[HumanMessage, AIMessage],
#     exclude_ids=['2']
# )
#
# message = human_filter.invoke(messages)
# for msg in message:
#     msg.pretty_print()


# prompt_template = ChatPromptTemplate(
#     [
#         ("system", "把以下内容翻译成{language}, 只返回翻译结果，不要返回任何其他内容"),
#         ("human", "{text}")
#         # MessagesPlaceholder("msgs")
#     ]
# )
# # print(type(prompt_template))
# parser = StrOutputParser()
# # chain = prompt_template | model | parser
#
# res = prompt_template.invoke(
#     {
#         "language": "俄语",
#         "text": "你是谁",
#         # "msgs": [
#         #     HumanMessage(content="你是一个助手"),
#         #     AIMessage(content="你是一个助手"),
#         # ]
#     }
# )
# # print(type(res))
# ai_msg = model.invoke(res)
# print(ai_msg.content)
# print(type(prompt_template.invoke(
#     {
#         "language": "Chinese",
#         "text": "Who are you?"
#     })
# ))
# print(res)

# prompt_template = PromptTemplate(
#     input_variables=["language", "text"],
#     template="翻译{language}: {text}"
# )
# print(type(prompt_template.invoke(
#     {
#         "language": "Chinese",
#         "text": "Who are you?"
#     }
# )))


# chat_prompt_template = ChatPromptTemplate(
#     [
#         ("human", "{input}"),
#         ("ai", "{text}")
#     ]
# )
# few_shot_prompt = FewShotChatMessagePromptTemplate(
#     example_prompt=chat_prompt_template,
#     examples=[
#         {
#             "input": "1。2",
#             "text": "3"
#         },
#         {
#             "input": "2。3",
#             "text": "5"
#         },
#     ]
# )
# chat_prompt_template = ChatPromptTemplate(
#     [
#         ("system", "你是一个数学助手"),
#         few_shot_prompt,
#         ("human", "{input}"),
#     ]
# )
# chain = chat_prompt_template | model
# chain.invoke(
#     {
#         "input": "12。12"
#     }
# ).pretty_print()


# example_prompt = PromptTemplate(
#     input_variables=["question", "answer"],
#     template="Question: {question}\n{answer}"
# )
# example_prompt = ChatPromptTemplate(
#     [
#         ("human", "{question}"),
#         ("ai", "{answer}")
#     ]
# )
# examples = [
#     {
#         "question": "李⽩和杜甫，谁更⻓寿？",
#         "answer": """
#         是否需要后续问题：是的。
#         后续问题：李⽩享年多少岁？
#         中间答案：李⽩享年61岁。
#         后续问题：杜甫享年多少岁？
#         中间答案：杜甫享年58岁。
#         所以最终答案是：李⽩
#         """
#     },
#     {
#         "question": "中国和美国，哪个更⻓？",
#         "answer": """
#         是否需要后续问题：是的。
#         后续问题：中国的国土面积是多少？
#         中间答案：中国的国土面积是964万平方公里。
#         后续问题：美国的国土面积是多少？
#         中间答案：美国的国土面积是383万平方公里。
#         所以最终答案是：中国
#         """
#     }
# ]

# few_shot_prompt = FewShotPromptTemplate(
#     example_prompt=example_prompt,
#     examples=examples,
#     suffix="Question: {input}",
#     input_variables=["input"]
# )
# few_shot_prompt = FewShotChatMessagePromptTemplate(
#     example_prompt=example_prompt,
#     examples=examples
# )
# chain = few_shot_prompt | model | StrOutputParser()
# res = few_shot_prompt.invoke(
#     {
#         "input": "电影《红⾼粱》和《霸王别姬》的导演来⾃同⼀个国家吗，按照格式输出"
#     }
# )

# res = chain.invoke(
#     {
#         "input": "电影《红⾼粱》和《霸王别姬》的导演来⾃同⼀个国家吗，按照格式输出"
#     }
# )
# print(res)
# res = chain.invoke(
#     {
#         "input": "电影《红⾼粱》和《霸王别姬》的导演来⾃同⼀个国家吗，按照格式输出"
#     }
# )

# messages = few_shot_prompt.invoke({}).to_messages()
# print(messages)
# messages.append(HumanMessage(content="电影《红⾼粱》和《霸王别姬》的导演来⾃同⼀个国家吗, 按照格式输出"))
# chain = model | StrOutputParser()
# res = chain.invoke(messages)
# print(res)
#
# class Person(BaseModel):
#     """人的信息"""
#     name: Optional[str] = Field(default=None, description="人的姓名")
#     hair_color: Optional[str] = Field(default=None, description="人的头发颜色")
#     skin_color: Optional[str] = Field(default=None, description="人的肤色")
#     height_in_meters: Optional[float] = Field(default=None, description="人的身高，单位为米")
#
#
# class Data(BaseModel):
#     """提取人信息"""
#     people: List[Person]
#
#
# examples = [
#     (
#         "海洋是⼴阔⽽蓝⾊的。它有两万多英尺深。",
#         Data(people=[])
#     ),
#     (
#         "⼩强从中国远⾏到美国。他喜欢小美。",
#         Data(people=[Person(name="⼩强"), Person(name="小美")])
#     )
# ]


# example_msgs = []
# for txt, tool_call in examples:
#     ai_msg: str
#     if tool_call.people:
#         ai_msg = f"提取到的人信息为：{tool_call.people}"
#     else:
#         ai_msg = "未提取到人信息"
#     example_msgs.extend(
#         tool_example_to_messages(
#             input=txt,
#             tool_calls=[tool_call],
#             ai_response=ai_msg
#         )
#     )

# prompt_template = ChatPromptTemplate(
#     [
#         ("system", "你是一个提取人信息的助手"),
#         MessagesPlaceholder("examples"),
#         ("human", "{input}")
#     ]
# )
# for msg in example_msgs:
#     msg.pretty_print()
#     print('-' * 100)
# structured_model = model.with_structured_output(Data)
# chain = prompt_template | structured_model
# res = chain.invoke({
#     "examples": example_msgs,
#     "input": "篮球场上，⾝⾼两⽶的中锋王伟默契地将球传给⼀⽶七的后卫挚友李明，完成⼀记绝杀。"
# })
# print(type(res))
# print(res)
# messages = [
#     SystemMessage(content="你是⼀个提取信息的专家，只从⽂本中提取相关信息。如果您不知道要提取的属性的值，属性值返回null"),
#     HumanMessage(content="海洋是⼴阔⽽蓝⾊的。他有两万多英尺深。")
# ]
# result = structured_model.invoke(messages)
# print(result)


# prompt_template = PromptTemplate(
#     input_variables=["input", "output"],
#     template="input:{input}\noutput:{output}"
# )
# example_messages = [
#     {"input": "thin", "output": "fat"},
#     {"input": "small", "output": "big"},
#     {"input": "objective", "output": "subjective"},
#     {"input": "kind", "output": "unkind"},
#     {"input": "good", "output": "bad"},
#     {"input": "shy", "output": "bold"},
#     {"input": "smart", "output": "dumb"},
#     {"input": "strong", "output": "weak"}
# ]
# example_selector = LengthBasedExampleSelector(
#     examples=example_messages,
#     example_prompt=prompt_template,
#     max_length=52,
#     get_text_length=len
# )
# example_selector = SemanticSimilarityExampleSelector(
#     examples=example_messages,
#     example_prompt=prompt_template,
# )
# messages = example_selector.select_examples(
#     {}
# )
# print(messages)
# few_shot_prompt = FewShotPromptTemplate(
#     example_selector=example_selector,
#     # examples=example_messages,
#     example_prompt=prompt_template,
#     suffix="input:{input}",
#     input_variables=["input"]
# )


prompt_template = PromptTemplate(
    input_variables=["input","output"],
    template="input:{input}\noutput:{output}"
)
chat_prompt_template = ChatPromptTemplate(
    [
        ("system", "你是一个寻找反义词的专家"),
        # MessagesPlaceholder("examples"),
        ("human", "{input}"),
        ("ai", "{output}"),
    ]
)
examples = [
    {"input": "thin", "output": "fat"},
    {"input": "small", "output": "big"},
    {"input": "objective", "output": "subjective"},
    {"input": "kind", "output": "unkind"},
    {"input": "good", "output": "bad"},
    {"input": "shy", "output": "bold"},
    {"input": "smart", "output": "dumb"},
    {"input": "strong", "output": "weak"},
    {"input": "happy", "output": "sad"}
]
example_selector = LengthBasedExampleSelector(
    examples=examples,
    example_prompt=prompt_template,
    max_length=500,
    get_text_length=len
)
length_prompt = FewShotChatMessagePromptTemplate(
    examples=examples,
    # example_selector=example_selector,
    example_prompt=chat_prompt_template,
)
print(length_prompt.invoke({"input": "relax", "output": "active"}).to_string())

example_messages = []
for example in examples:
    example_messages.append(
        HumanMessage(content=example["input"])
    )
    example_messages.append(
        AIMessage(content=example["output"])
    )
# print(chat_prompt_template.invoke({"input": "thin", "examples": example_messages}).to_messages())
# print(chat_prompt_template.invoke({"input": "thin", "examples": example_messages}).to_string())


