from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain_community.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import PromptTemplate
from langchain_community.llms import LlamaCpp
from langchain_community.chat_models import ChatOllama

def one():
    llm = Ollama(model="llama2")

    res = llm.invoke("hello")

    print(res)

# langchain + ollama
def f2():
    chat = ChatOllama(model="llama2")
    msg = [
        HumanMessage(content='who are you')
    ]
    res = chat.invoke(msg)
    print(res.content,end='\n')

# langchain + llama.cpp
def f3():
    template = """Question: {question}

    Answer: Let's work this out in a step by step way to be sure we have the right answer."""

    prompt = PromptTemplate.from_template(template)
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])

    # Make sure the model path is correct for your system!
    llm = LlamaCpp(
        model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
        temperature=0.75,
        max_tokens=2000,
        top_p=1,
        callback_manager=callback_manager,
        verbose=True,  # Verbose is required to pass to the callback manager
    )
    res = llm.invoke(prompt)
    print(res)

# pip3 install jupyter
# jupyter notebook

def f5():

    llm = Ollama(model="llama2")
    res = llm.invoke("who are you")
    print(res)

if __name__ == '__main__':
    print('hello')
    f2()



