from langchain import OpenAI
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain.callbacks import get_openai_callback
import os


def track_tokens_usage(chain, query):
    with get_openai_callback() as cb:
        result = chain.run(query)
        print(f'Total tokens: {cb.total_tokens}')

    return result


llm = OpenAI(
    temperature=0,
    openai_api_key=os.environ["OPENAI_API_KEY"],
    model_name="text-davinci-003"
)

conversation = ConversationChain(llm=llm, memory=ConversationSummaryMemory(llm=llm))

print(conversation.memory.prompt.template)

track_tokens_usage(conversation, "My interest is to explore the options of scaling Ethereum")

track_tokens_usage(conversation, "Could you please elaborate more on sharding? Try to use at least 1000 words.")

track_tokens_usage(conversation, "What are the cons of sharding?")

track_tokens_usage(conversation, "How about state channels?")

track_tokens_usage(conversation, "Show me some popular state channel products")

print(conversation.memory.buffer)
