|
from fastapi import FastAPI |
|
from fastapi.middleware.cors import CORSMiddleware |
|
import requests |
|
import subprocess |
|
print("Start Start:") |
|
print("Start Start:") |
|
print("Start Start:") |
|
print("Start Start:") |
|
print("Start Start:") |
|
|
|
|
|
install_command = "curl https://ollama.ai/install.sh | sh" |
|
install_result = subprocess.run(install_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
|
|
|
|
|
print("Installation Output:") |
|
print(install_result.stdout) |
|
|
|
|
|
print("Installation Errors:") |
|
print(install_result.stderr) |
|
|
|
|
|
if install_result.returncode == 0: |
|
|
|
pull_command = "ollama pull mistral" |
|
|
|
|
|
pull_result = subprocess.run(pull_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
|
|
|
|
|
print("\nPull Output:") |
|
print(pull_result.stdout) |
|
|
|
|
|
print("Pull Errors:") |
|
print(pull_result.stderr) |
|
|
|
|
|
print("Pull Return Code:", pull_result.returncode) |
|
|
|
|
|
if pull_result.returncode == 0: |
|
|
|
serve_command = "ollama serve" |
|
|
|
|
|
serve_result = subprocess.run(serve_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
|
|
|
|
|
print("\nServer Start Output:") |
|
print(serve_result.stdout) |
|
|
|
|
|
print("Server Start Errors:") |
|
print(serve_result.stderr) |
|
|
|
|
|
print("Server Start Return Code:", serve_result.returncode) |
|
else: |
|
print("Pull command failed. Aborting server start.") |
|
else: |
|
print("Installation failed. Aborting pull and server start.") |
|
|
|
|
|
|
|
command = "ollama pull mistral" |
|
|
|
|
|
result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
|
|
|
|
|
print("Output:") |
|
print(result.stdout) |
|
|
|
|
|
print("Errors:") |
|
print(result.stderr) |
|
|
|
|
|
print("Return Code:", result.returncode) |
|
|
|
|
|
|
|
|
|
|
|
def get_ollam_response(prompt): |
|
response = requests.post( |
|
"http://localhost:11434/api/generate", |
|
json={ |
|
"model": "mistral", |
|
"prompt": prompt, |
|
"stream": False |
|
} |
|
) |
|
resp = response.json() |
|
return resp["response"], resp["eval_count"] |
|
|
|
|
|
app = FastAPI(openapi_url="/api/v1/sparrow-data/openapi.json", docs_url="/api/v1/sparrow-data/docs") |
|
|
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=["*"], |
|
allow_methods=["*"], |
|
allow_headers=["*"], |
|
allow_credentials=True, |
|
) |
|
|
|
|
|
|
|
@app.post("/ask_llm") |
|
def ask_HFAPI_endpoint(prompt: str): |
|
result = get_ollam_response(prompt) |
|
return {"result": result} |