from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from pydantic import BaseModel, Field, validator
from typing import List, Dict, Optional
from enum import Enum
import json
from dotenv import load_dotenv, find_dotenv
# 加载环境变量
_ = load_dotenv(find_dotenv())


# 输出结构
class SortEnum(str, Enum):
    data = 'data'
    price = 'price'


class OrderingEnum(str, Enum):
    ascend = 'ascend'
    descend = 'descend'


class Semantics(BaseModel):
    name: Optional[str] = Field(description="流量包名称", default=None)
    price_lower: Optional[int] = Field(description="价格下限", default=None)
    price_upper: Optional[int] = Field(description="价格上限", default=None)
    data_lower: Optional[int] = Field(description="流量下限", default=None)
    data_upper: Optional[int] = Field(description="流量上限", default=None)
    sort_by: Optional[SortEnum] = Field(description="按价格或流量排序", default=None)
    ordering: Optional[OrderingEnum] = Field(
        description="升序或降序排列", default=None)


# Prompt 模板
prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "你是一个语义解析器。你的任务是将用户的输入解析成JSON表示。不要回答用户的问题。"),
        ("human", "{text}"),
    ]
)

# 模型
llm = ChatOpenAI(model="deepseek-ai/DeepSeek-V3", temperature=0)
#
# structured_llm = llm.with_structured_output(Semantics)
#
# # LCEL 表达式
# runnable = (
#     {"text": RunnablePassthrough()} | prompt | structured_llm
# )
#
# # 直接运行
# ret = runnable.invoke("不超过100元的流量大的套餐有哪些")
# print(
#     json.dumps(
#         ret.dict(),
#         indent = 4,
#         ensure_ascii=False
#     )
# )

prompt = PromptTemplate.from_template("讲个关于{topic}的笑话")

# LCEL使用管道符（|）串联组件，每个组件的输出作为下一个组件的输入，类似于函数式编程的流水线
runnable = (
    {"topic": RunnablePassthrough()} | prompt | llm | StrOutputParser()
)


# 流式输出
for s in runnable.stream("小明"):
    print(s, end="", flush=True)

