print("Hello, Mistral!")

# Make sure  mistral:instruct is running by ollama
# > docker start ollama
# > docker exec -it ollama ollama run mistral:instruct

import requests

# Set the URL for the Mistral language model API
url = "http://ollama:11434/api/generate"

# Set the text that you want to generate text for
prompt = "who are you"

# Make a POST request to the Mistral language model API with the input text
response = requests.post(url, json={"model": "mistral:instruct", "prompt": prompt})

# Print the response from the API
#print(response.json())

print(response.status_code)

print(response.text)

#response.close()


