from langchain import OpenAI
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain.callbacks import get_openai_callback
import os


def track_tokens_usage(chain, query):
    with get_openai_callback() as cb:
        result = chain.run(query)
        print(f'Total tokens: {cb.total_tokens}')
        print(f'Requests: {cb.successful_requests}')

    return result


# 手动读取.env文件并加载环境变量
with open('.env', 'r') as f:
    for line in f:
        key, value = line.strip().split('=')
        os.environ[key] = value

llm = OpenAI(
    temperature=0,
    openai_api_key=os.environ["OPENAI_API_KEY"],
    model_name="text-davinci-003"
)

llm("What is Langchain?")
llm("What did I just ask you?")

conversation = ConversationChain(llm=llm, memory=ConversationBufferMemory())
print(conversation.prompt.template)
track_tokens_usage(conversation, "My interest is to explore the options of scaling Ethereum")
track_tokens_usage(conversation, "Could you please elaborate more on sharding? Try to use at least 1000 words.")
track_tokens_usage(conversation, "What are the cons of sharding?")
track_tokens_usage(conversation, "How about state channels?")
track_tokens_usage(conversation, "Show me some popular state channel products")
print(conversation.memory.buffer)
