from langchain.chains import LLMChain
from langchain.prompts.example_selector import (
    MaxMarginalRelevanceExampleSelector,
    SemanticSimilarityExampleSelector,
)
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.prompts import FewShotPromptTemplate, PromptTemplate

import os

from langchain_community.chat_models import ChatTongyi

os.environ["DASHSCOPE_API_KEY"] = "sk-9d8f1914800e497f8717144e860f99bc"
llm = ChatTongyi()

example_prompt = PromptTemplate(
    input_variables=["input", "output"],
    template="Input: {input}\nOutput: {output}",
)

# #创建反义词的假装任务的示例。
examples = [
    {"input": "happy", "output": "sad"},
    {"input": "tall", "output": "short"},
    {"input": "energetic", "output": "lethargic"},
    {"input": "sunny", "output": "gloomy"},
    {"input": "windy", "output": "calm"},
]

example_selector = SemanticSimilarityExampleSelector.from_examples(
    examples,
    DashScopeEmbeddings(),
    Chroma,
    k=2,
)

final_prompt = FewShotPromptTemplate(
    example_selector=example_selector,
    example_prompt=example_prompt,
    prefix="给出每个输入的反义词,必须要从已给的样例中选取结果，请不要自主回答",
    suffix="Input: {adjective}\nOutput:",
    input_variables=["adjective"],
)

print(final_prompt.format(adjective="happy"))
chain = LLMChain(llm=llm, prompt=final_prompt)
res = chain.invoke("glad")
print(f"the result is {res}")
