# %% import
import os

# os.environ['OPENAI_API_KEY'] = "sk-"
# os.environ['OPENAI_API_BASE'] = "https:"
from dotenv import load_dotenv

load_dotenv()

from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

# %%
llm = OpenAI()
prompt = PromptTemplate(
    input_variables=['place'],
    template="Best places to visit in {place}?"
)

chain = LLMChain(llm=llm, prompt=prompt)

# run the chain only specifying the input variables
''' 
LangChainDeprecationWarning: The function `run` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. 
Use invoke instead.
'''
# print(chain.run("India"))
# print(chain.invoke({'place': 'India'}))
print(chain.invoke({'place': 'India'}).get("text"))

# %% simple sequential chains
from langchain.chains import SimpleSequentialChain

# from langchain import HuggingFaceHub

template = """You have to suggest 5 best places to visit in {place}?

YOUR RESPONSE:
"""
prompt_template = PromptTemplate(
    input_variables=['place'],
    template=template
)

# HF_11m = HuggingFaceHub(repo_id="google/flan-t5-large")
place_chain = LLMChain(llm=llm, prompt=prompt_template)

template = """Given a list a places, please estimate the expenses to visit all of them in local currency and also the days needed
{expenses}

YOUR RESPONSE:
"""
prompt_template = PromptTemplate(
    input_variables=['expenses'],
    template=template
)

llm = OpenAI()

expense_chain = LLMChain(llm=llm, prompt=prompt_template)

# place_chain的输出是expense_chain的输入
# verbose=True 可以查看内部情况
final_chain = SimpleSequentialChain(chains=[place_chain, expense_chain], verbose=True)

# review=final_chain.run("India")
review = final_chain.invoke({'input': 'India'}).get("text")
