PP-API-v0.1 / app.py
sguertl's picture
Update app.py
43c9d7c verified
from fastapi import FastAPI, Request
from pydantic import BaseModel
import openai
import uvicorn
import requests
import re
import os
app = FastAPI()
MODEL = "meta-llama/Llama-3.3-70B-Instruct"
HF_TOKEN = os.environ["HF_TOKEN"]
HB_TOKEN = os.environ["HB_TOKEN"]
PROMPTS_DOC_URL = os.environ["PROMPTS"]
def fetch_prompts_from_google_doc():
print("Fetching prompts from Google Doc...")
response = requests.get(PROMPTS_DOC_URL)
if response.status_code != 200:
raise Exception("Failed to fetch document")
text = response.text
prompts = {}
pattern = r"\{BEGIN (.*?)\}([\s\S]*?)\{END \1\}"
matches = re.findall(pattern, text)
for key, content in matches:
prompts[key.strip()] = content.strip()
return prompts
class NavigatePrompt(BaseModel):
message: str
code: str
class DrivePrompt(BaseModel):
instruction: str
code: str
@app.post("/drive")
async def drive(prompt: DrivePrompt):
prompts = fetch_prompts_from_google_doc()
print("Received POST to /drive")
print("Instruction:", prompt.instruction)
system_prompt = f"""
### Unit Information ###
{prompts.get('UNIT_INFORMATION_DRIVER', '')}
### Role Description ###
{prompts.get('ROLE_DESCRIPTION_DRIVER', '')}
### Behavioral Instructions ###
{prompts.get('BEHAVIORAL_INSTRUCTIONS_DRIVER', '')}
"""
user_prompt = f"""
### Instruction ###
{prompt.instruction}
### Existing Code ###
{prompt.code}
"""
client = openai.OpenAI(
api_key=HB_TOKEN,
base_url="https://api.hyperbolic.xyz/v1",
)
chat_completion = client.chat.completions.create(
model="meta-llama/Meta-Llama-3-70B-Instruct",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=0.7,
max_tokens=1024,
)
reply = chat_completion.choices[0].message.content.strip()
print("Drive response:", reply)
return {"reply": reply}
@app.post("/navigate")
async def chat(prompt: NavigatePrompt):
prompts = fetch_prompts_from_google_doc()
print("Received POST request")
print("Message:", prompt.message)
system_prompt = f"""
### Unit Information ###
{prompts.get('UNIT_INFORMATION_NAVIGATOR', '')}
### Role Description ###
{prompts.get('ROLE_DESCRIPTION_NAVIGATOR', '')}
### Topic Information ###
{prompts.get('TOPIC_INFORMATION_NAVIGATOR', '')}
### Task Description ###
{prompts.get('TASK_DESCRIPTION_NAVIGATOR', '')}
### Reference Solution ###
{prompts.get('REFERENCE_SOLUTION_NAVIGATOR', '')}
### Behavioral Instructions ###
{prompts.get('BEHAVIORAL_INSTRUCTIONS_NAVIGATOR', '')}
"""
user_prompt = f"""
### Message ###
{prompt.message}
### Code ###
{prompt.code}
"""
client = openai.OpenAI(
api_key=HB_TOKEN,
base_url="https://api.hyperbolic.xyz/v1",
)
chat_completion = client.chat.completions.create(
model="meta-llama/Meta-Llama-3-70B-Instruct",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=0.7,
max_tokens=1024,
)
text_response = chat_completion.choices[0].message.content
print("Text generation done", text_response.strip())
return {"reply": text_response.strip()}