from typing import Optional, List

from langchain.globals import set_verbose
from langchain_community.chat_models import ChatTongyi
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.utils.function_calling import tool_example_to_messages
from pydantic import BaseModel, Field, SecretStr

set_verbose(True)  # 全局开启详细日志输出

class Person(BaseModel):
    """Information about a person."""

    # ^ Doc-string for the entity Person.
    # This doc-string is sent to the LLM as the description of the schema Person,
    # and it can help to improve extraction results.

    # Note that:
    # 1. Each field is an `optional` -- this allows the model to decline to extract it!
    # 2. Each field has a `description` -- this description is used by the LLM.
    # Having a good description can help improve extraction results.
    name: str = Field(default=None, description="人的名称")
    hair_color: Optional[str] = Field(
        default=None, description="头发颜色"
    )
    height_in_meters: Optional[str] = Field(
        default=None, description="身高,单位为米"
    )
    weight_in_kg: Optional[str] = Field(
        default=None, description="体重,单位为千克"
    )


class Data(BaseModel):
    """Extracted data about people."""

    # Creates a model so that we can extract multiple entities.
    people: List[Person] = Field(description="从文本中提取的所有人物")

chatLLM = ChatTongyi(
    model="qwen3-235b-a22b",
    # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
    api_key=SecretStr("sk-d16b46d66abb45bb960bd9c57804e2f9"),
    # other params...
)

structured_llm = chatLLM.with_structured_output(schema=Data)




# Define a custom prompt to provide instructions and any additional context.
# 1) You can add examples into the prompt template to improve extraction quality
# 2) Introduce additional parameters to take context into account (e.g., include metadata
#    about the document from which the text was extracted.)
prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "你是提取算法的专家。"
            "仅从文本中提取相关信息。如果文本中包含多个特征信息,则返回多个"
            "如果您不知道要求提取的属性的值，"
            "属性值返回null。",
        ),
        # Please see the how-to about improving performance with
        # reference examples.
        # MessagesPlaceholder('examples'),
        ("human", "{text}"),
    ]
)
# Step 2: 构建 Prompt

# prompt_template = ChatPromptTemplate.from_messages([
#     ("system", """你是一个信息提取助手，请从以下文本中提取所有人物信息：
#
#     输出必须符合如下 JSON Schema:
#     {schema}
#
#     如果某个字段没有明确提及，请留空字符串或 null，但不能省略字段。
#     请确保提取所有提到的人物。
#     """),
#     ("human", "{text}")
# ]).partial(schema=Data.model_json_schema())
#
# chain = prompt_template | structured_llm
#
# # Step 4: 输入文本
# text = "邢冬阳身高173，体重70，头发黑色。张通比我高10厘米，比我重10公斤，头发的颜色和我一样。小明的头发和张通一样。"
#
# result = chain.invoke({"text": text})
# print(result)

print("--------------------")
text = "邢冬阳身高173，体重70，头发黑色。张通比我高10厘米，比我重10公斤，头发的颜色和我一样。小明的头发和张通一样。"
prompt = prompt_template.invoke({"text": text})
print(f"prompt: {prompt}")

person = structured_llm.invoke(prompt)
print(person)



#
# examples = [
#     (
#         "大海浩瀚而湛蓝。它有20000多英尺深。",
#         Data(people=[]),
#     ),
#     (
#         "冬阳去海南旅游了",
#         Data(people=[Person(name="冬阳", height_in_meters=None, hair_color=None)]),
#     ),
# ]
#
# messages = []
#
# for txt, tool_call in examples:
#     if tool_call.people:
#         # This final message is optional for some providers
#         ai_response = "检测到人"
#     else:
#         ai_response = "未检测到人"
#     messages.extend(tool_example_to_messages(txt, [tool_call], ai_response=ai_response))
# for message in messages:
#     message.pretty_print()