from langchain.chat_models import init_chat_model
import os
from pydantic import BaseModel, Field
from typing import Optional, Union
from typing_extensions import Annotated, TypedDict
key = os.getenv("OPENAI_API_KEY")
# print(key)
api_key = str(key)

llm = init_chat_model(
    model="gpt-4o-mini",
    model_provider="openai",
    base_url="https://api.zetatechs.com/v1",
    api_key=api_key
)


# Using pydantic  choosing multi schemas
class Joke(BaseModel):
    """Joke to tell user."""

    setup: str = Field(description="The setup of the joke")
    punchline: str = Field(description="The punchline of the joke")
    rating: Optional[int] = Field(None, description="How funny the joke is, from 1 to 10")
#
# class ConversationalResponse(BaseModel):
#     """Respond in a conversational manner. Be kind and helpful"""
#     response: str = Field(description="A conversational response to the user's query")
#
# class FinalResponse(BaseModel):
#     response: str = Union[Joke,ConversationalResponse]
#
# structured_llm = llm.with_structured_output(FinalResponse)
# print(structured_llm.invoke("Tell me a joke about cats"))
# print(structured_llm.invoke("How are u?"))


# Using TypedDict
# class Joke(TypedDict):
#     """Joke to tell user."""
#     setup: Annotated[str, ..., "The setup of the joke"]
#     punchline: Annotated[str, ..., "The punchline of the joke"]
#     rating: Annotated[Optional[int], None, "How funny the joke is, from 1 to 10"]
#
# class ConversationalResponse(TypedDict):
#     """Respond in a conversational manner. Be kind and helpful."""
#     response: Annotated[str, ..., "A conversational response to the user's query"]
#
# class FinalResponse(TypedDict):
#     final_output: Union[Joke, ConversationalResponse]
#
# structured_llm = llm.with_structured_output(FinalResponse)
# print(structured_llm.invoke("Tell me a joke about cats"))
# print(structured_llm.invoke("How are you today?"))

# Streaming
# class Joke(TypedDict):
#     """Joke to tell user."""
#     setup: Annotated[str, ..., "The setup of the joke"]
#     punchline: Annotated[str, ..., "The punchline of the joke"]
#     rating: Annotated[Optional[int], None, "How funny the joke is, from 1 to 10"]
#
#
# structured_llm = llm.with_structured_output(Joke)
# for chunk in structured_llm.stream("Tell me a joke about cats"):
#     print(chunk)


# Few-shot prompting
# from langchain_core.prompts import ChatPromptTemplate
#
# system = """You are a hilarious comedian. Your specialty is knock-knock jokes. \
# Return a joke which has the setup (the response to "Who's there?") and the final punchline (the response to "<setup> who?").
#
# Here are some examples of jokes:
#
# example_user: Tell me a joke about planes
# example_assistant: {{"setup": "Why don't planes ever get tired?", "punchline": "Because they have rest wings!", "rating": 2}}
#
# example_user: Tell me another joke about planes
# example_assistant: {{"setup": "Cargo", "punchline": "Cargo 'vroom vroom', but planes go 'zoom zoom'!", "rating": 10}}
#
# example_user: Now about caterpillars
# example_assistant: {{"setup": "Caterpillar", "punchline": "Caterpillar really slow, but watch me turn into a butterfly and steal the show!", "rating": 5}}"""
#
# prompt = ChatPromptTemplate.from_messages([("system", system), ("human", "{input}")])
#
# few_shot_structured_llm = prompt | llm
# print(llm.invoke("what's something funny about woodpeckers"))

# Advanced
# structured_llm = llm.with_structured_output(None, method="json_mode")
# # 但不是所有的模型都支持这个
# print(structured_llm.invoke(
#     "Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys"
# ))
#
# structured_llm = llm.with_structured_output(Joke, include_raw=True)
# print(structured_llm.invoke("Tell me a joke about cats"))