# This is a sample Python script.

# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.


def print_hi(name):
    # Use a breakpoint in the code line below to debug your script.
    print(f'Hi, {name}')  # Press ⌘F8 to toggle the breakpoint.

def f1():
    from llama_cpp import Llama
    llm = Llama(
        model_path="./mistralai2/mistral-7b-instruct-v0.2.Q8_0.gguf",
        n_threads=2,
        # n_gpu_layers=-1, # Uncomment to use GPU acceleration
        # seed=1337, # Uncomment to set a specific seed
        # n_ctx=2048, # Uncomment to increase the context window
    )
    output = llm(
        "Q: Name the planets in the solar system? A: ",  # Prompt
        max_tokens=32,  # Generate up to 32 tokens, set to None to generate up to the end of the context window
        stop=["Q:", "\n"],  # Stop generating just before the model would generate a new question
        echo=True  # Echo the prompt back in the output
    )  # Generate a completion, can also call create_completion
    print(output)

#     json model 调用
def f2():
    from llama_cpp import Llama
    model_path = "./mistralai2/mistral-7b-instruct-v0.2.Q8_0.gguf",

    llm = Llama(model_path='./mistralai2/mistral-7b-instruct-v0.2.Q8_0.gguf')
    llm.create_chat_completion(
        messages=[
            {
                "role": "system",
                "content": "You are a helpful assistant that outputs in JSON.",
            },
            {"role": "user", "content": "Who won the world series in 2020"},
        ],
        response_format={
            "type": "json_object",
            "schema": {
                "type": "object",
                "properties": {"team_name": {"type": "string"}},
                "required": ["team_name"],
            },
        },
        temperature=0.7,
    )

# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    print_hi('PyCharm')
    f1()


