# utility chains overview
# %% load_summarization_chain
import os

# os.environ['OPENAI_API_KEY'] = "sk-"
# os.environ['OPENAI_API_BASE'] = ""
from dotenv import load_dotenv

load_dotenv()

from langchain_openai.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.docstore.document import Document

# %%
llm = OpenAI(temperature=0.9)

# reading the document
with open("Sample.txt", encoding='utf-8') as f:
    data = f.read()

# split txt
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(data)

# create multiple documents
docs = [Document(page_content=t) for t in texts]
print(docs)

chain = load_summarize_chain(llm, chain_type='map_reduce', verbose=True)
# chain.run(docs)
# 输出摘要
print(chain.invoke(docs).get('output_text'))

# %% http request
## llm requests chain
from langchain.chains import LLMRequestsChain, LLMChain

template = """
Extract the answer to the question '{query}' or say "not found" if the information is not available.
{requests_result}
"""

PROMPT = PromptTemplate(
    input_variables=["query", "requests_result"],
    template=template,
)

llm = OpenAI()
chain = LLMRequestsChain(llm_chain=LLMChain(llm=llm, prompt=PROMPT))

question = "What is the capital of india?"
inputs = {
    'query': question,
    'url': "https://cn.bing.com/search?q=" + question.replace(" ", "+"),
}
print(chain(inputs))

import inspect

print(inspect.getsource(chain._call))
