"""
接 Episode 6
"""
from typing import Literal
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
import os
from langchain.chat_models import init_chat_model

"""
这是classification 的一个小样例
"""
#  从环境变量中读取DeepSeek的API Key
key = os.getenv("OPENAI_API_KEY")
# print(key)
api_key = str(key)

llm = init_chat_model(
    model="gpt-4o-mini",
    base_url="https://api.zetatechs.com/v1",
    api_key=api_key
)


class Classification(BaseModel):
    # sentiment: str = Field(..., enum=["happy", "neutral", "sad"])
    # aggressiveness: int = Field(
    #     ...,
    #     description="describes how aggressive the statement is, the higher the number the more aggressive",
    #     enum=[1, 2, 3, 4, 5],
    # )
    # language: str = Field(
    #     ..., enum=["spanish", "english", "french", "german", "italian"]
    # )
    sentiment: Literal["happy", "neutral", "sad"]
    aggressiveness: Literal[1, 2, 3, 4, 5] = Field(
        description="describes how aggressive the statement is, the higher the number the more aggressive",
    )
    language: Literal["spanish", "english", "french", "german", "italian"]


tagging_prompt = ChatPromptTemplate.from_template(
    """
Extract the desired information from the following passage.

Only extract the properties mentioned in the 'Classification' function.

Passage:
{input}
"""
)

response = llm.with_structured_output(Classification)

inp = "Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!"
prompt = tagging_prompt.invoke({"input": inp})
print(response.invoke(prompt))
inp = "Estoy muy enojado con vos! Te voy a dar tu merecido!"
prompt = tagging_prompt.invoke({"input": inp})
print(response.invoke(prompt))
inp = "Weather is ok here, I can go outside without much more than a coat"
prompt = tagging_prompt.invoke({"input": inp})
print(response.invoke(prompt))
