# Classification: Classify text into categories or labels using chat models with structured outputs.


from dotenv import load_dotenv
load_dotenv()
import getpass
import os

if not os.environ.get("DEEPSEEK_API_KEY"):
  os.environ["DEEPSEEK_API_KEY"] = getpass.getpass("Enter API key for Google Gemini: ")

from langchain.chat_models import init_chat_model

llm = init_chat_model("deepseek-chat")

from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field

tagging_prompt = ChatPromptTemplate.from_template(
    """
Extract the desired information from the following passage.

Only extract the properties mentioned in the 'Classification' function.

Passage:
{input}
"""
)


class Classification(BaseModel):
  sentiment: str = Field(..., enum=["happy", "neutral", "sad"])
  aggressiveness: int = Field(
      ...,
      description="describes how aggressive the statement is, the higher the number the more aggressive",
      enum=[1, 2, 3, 4, 5],
  )
  language: str = Field(
      ..., enum=["spanish", "english", "french", "german", "italian"]
  )


# Structured LLM
structured_llm = llm.with_structured_output(Classification)


inp = "Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!"
prompt = tagging_prompt.invoke({"input": inp})
response = structured_llm.invoke(prompt)

print(response)

inp = "Estoy muy enojado con vos! Te voy a dar tu merecido!"
prompt = tagging_prompt.invoke({"input": inp})
print(structured_llm.invoke(prompt))


inp = "Weather is ok here, I can go outside without much more than a coat"
prompt = tagging_prompt.invoke({"input": inp})
print(structured_llm.invoke(prompt))