from langchain import hub, LLMChain
##from langchain.chat_models import ChatOpenAI
from langchain_ollama import ChatOllama
#from keys import OPENAI_API_KEY
import os
import json

#os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY

assumption_template = hub.pull("smithing-gold/assumption-checker")
print("assumption:")
#print(assumption_template)
print(json.dumps(assumption_template, indent=2))
print("*8888888888888888888")

#llm = ChatOpenAI()
llm = ChatOllama(base_url="http://192.168.99.142:11434", model="qwen2.5-coder:latest")

llm_chain = LLMChain(llm=llm, prompt=assumption_template)

while True:
    question = input("What is your question ? Type quit to leave the chat.\n\n")

    if question == 'quit':
        break

    print("\n Response:")
    response = llm_chain.invoke(question)

    print(response)