# from langchain_community.llms import Tongyi
from langchain_community.llms.tongyi import Tongyi
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.prompts import PromptTemplate
from typing_extensions import Annotated,TypedDict
from typing import Optional

# from langchain_openai.chat_models.base import BaseChatOpenAI
# from langchain.llms import OpenAI
# from getpass import getpass
# DASHSCOPE_API_KEY = getpass()
import os


# TypedDict
class Joke(TypedDict):
    """Joke to tell user."""

    setup: Annotated[str, ..., "The setup of the joke"]

    # Alternatively, we could have specified setup as:

    # setup: str                    # no default, no description
    # setup: Annotated[str, ...]    # no default, no description
    # setup: Annotated[str, "foo"]  # default, no description

    punchline: Annotated[str, ..., "The punchline of the joke"]
    rating: Annotated[Optional[int], None, "How funny the joke is, from 1 to 10"]




# os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
model = Tongyi(
                model="qwen-plus",
                api_key="sk-859a3b01f12b4a069821b77a5096179e",
            )

# # res = llm.invoke("人生的意义是啥？")
# for chunk in llm.stream('人生的意义是啥'):
#     print(chunk, end="")
# messages = [
#     SystemMessage("Translate the following from English into Italian"),
#     HumanMessage("hi!"),
# ]

# # r = model.invoke(messages)
# # print(r)
# # print(messages)


# for token in model.stream(messages):
#     print(token, end="|")
    
    
structured_llm = model.with_structured_output(Joke)
res = structured_llm.invoke(" Tell me a joke.")
print(res)


    
    

    
    