File size: 3,172 Bytes
c13187b 0195065 c5f20f1 0195065 c5f20f1 49028ae c5f20f1 49028ae 0195065 49028ae 0195065 c13187b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import requests
import subprocess
print("Start Start:")
print("Start Start:")
print("Start Start:")
print("Start Start:")
print("Start Start:")
# Install ollama using the provided script
install_command = "curl https://ollama.ai/install.sh | sh"
install_result = subprocess.run(install_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# Print the installation output
print("Installation Output:")
print(install_result.stdout)
# Print any installation errors
print("Installation Errors:")
print(install_result.stderr)
# Check the return code of the installation
if install_result.returncode == 0:
# Installation was successful, now run the 'ollama pull mistral' command
pull_command = "ollama pull mistral"
# Run the pull command
pull_result = subprocess.run(pull_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# Print the pull output
print("\nPull Output:")
print(pull_result.stdout)
# Print any pull errors
print("Pull Errors:")
print(pull_result.stderr)
# Print the return code of the pull command
print("Pull Return Code:", pull_result.returncode)
# Check if the pull command was successful before starting the server
if pull_result.returncode == 0:
# Start the server using 'ollama serve'
serve_command = "ollama serve"
# Run the server start command
serve_result = subprocess.run(serve_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# Print the server start output
print("\nServer Start Output:")
print(serve_result.stdout)
# Print any server start errors
print("Server Start Errors:")
print(serve_result.stderr)
# Print the return code of the server start command
print("Server Start Return Code:", serve_result.returncode)
else:
print("Pull command failed. Aborting server start.")
else:
print("Installation failed. Aborting pull and server start.")
# Command to run
command = "ollama pull mistral"
# Run the command
result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
# Print the output
print("Output:")
print(result.stdout)
# Print any errors
print("Errors:")
print(result.stderr)
# Print the return code
print("Return Code:", result.returncode)
def get_ollam_response(prompt):
response = requests.post(
"http://localhost:11434/api/generate",
json={
"model": "mistral",
"prompt": prompt,
"stream": False
}
)
resp = response.json()
return resp["response"], resp["eval_count"]
app = FastAPI(openapi_url="/api/v1/sparrow-data/openapi.json", docs_url="/api/v1/sparrow-data/docs")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
)
@app.post("/ask_llm")
def ask_HFAPI_endpoint(prompt: str):
result = get_ollam_response(prompt)
return {"result": result} |