import os
from dotenv import load_dotenv
from langchain_community.vectorstores import FAISS
from langchain_core.example_selectors import MaxMarginalRelevanceExampleSelector
from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_openai import OpenAIEmbeddings


load_dotenv(verbose=True)

api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")

examples = [
    {"input": "happy", "output": "sad"},
    {"input": "tall", "output": "short"},
    {"input": "big", "output": "small"},
    {"input": "fast", "output": "slow"},
    {"input": "高兴", "output": "悲伤"},
]

example_prompt = PromptTemplate(
    input_variables=["input", "output"],
    template="原词：{input}\n反义：{output}"
)

embeddings = OpenAIEmbeddings(
    openai_api_base=api_base,
    api_key=api_key
)

example_selector = MaxMarginalRelevanceExampleSelector.from_examples(
    examples,
    embeddings,
    FAISS,
    k=2,
)

dynamic_prompt = FewShotPromptTemplate(
    example_selector=example_selector,
    example_prompt=example_prompt,
    prefix="给出输入词的反义词",
    suffix="原词：{input}\n反义：",
    input_variables=["input"],
)

llm = ChatOpenAI(
    model="gpt-4o-mini",
    temperature=0,
    openai_api_base=api_base,
    openai_api_key=api_key
)

chain = dynamic_prompt | llm

# print(chain.invoke({"input": "难过"}))
print(dynamic_prompt.format(input="难过"))
