from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough  # 可传递的Runnable
from pydantic import BaseModel, Field
from typing import List, Dict, Optional
from enum import Enum
import json

# 枚举类,
class SortEnum(str, Enum):
    data = "data"
    price = "price"
    
class OrderingEnum(str, Enum):
    ascend = "ascend"
    descend = "descend"

class Semantics(BaseModel):
    name: Optional[str] = Field(description="流量包名称", default=None)
    price_lower: Optional[int] = Field(description="价格下限", default=None)
    price_upper: Optional[int] = Field(description="价格上限", default=None)
    data_lower: Optional[int] = Field(description="流量下限", default=None)
    data_upper: Optional[int] = Field(description="流量上限", default=None)
    sort_by: Optional[SortEnum] = Field(description="按价格或流量排序", default=None)
    ordering: Optional[OrderingEnum] = Field(description="升序或降序排列", default=None)
    
# PromptTemplate
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个语义解析器。你的任务是将用户的输入解析成JSON表示。不要回答用户的问题。"),
    ("human", "{input}"),
])

# 模型
llm = ChatOpenAI(model="gpt-4o", temperature=0)
structured_llm = llm.with_structured_output(Semantics)

# 定义LCEL， RunnablePassthrough：可传递的Runnable
runnable = {"input": RunnablePassthrough()} | prompt | structured_llm

# 运行
result = runnable.invoke("我需要一个价格在100-200之间，流量在1000-2000之间的流量包, 有吗？")
print(
    json.dumps(
        result.model_dump(), # 将Semantics对象转换为字典
        indent=4,
        ensure_ascii=False
    )
)

result = runnable.invoke("不超过100元的流量大的套餐有哪些, 优先推荐流量大的")
print(
    json.dumps(
        result.model_dump(), # 将Semantics对象转换为字典
        indent=4,
        ensure_ascii=False
    )
)
