File size: 3,504 Bytes
3f03ec9
 
aceae0f
3f03ec9
6dc93bc
 
3f03ec9
 
 
 
6dc93bc
3f03ec9
aceae0f
6dc93bc
3f03ec9
6dc93bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a039c9
3f03ec9
6dc93bc
3f03ec9
8a039c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43c9d7c
 
 
 
 
8a039c9
 
 
 
 
 
 
 
 
 
 
 
 
 
845448c
8a039c9
6dc93bc
4427fcc
b0c020a
4427fcc
6dc93bc
 
8a039c9
6dc93bc
 
8a039c9
3f03ec9
6dc93bc
8a039c9
6dc93bc
 
8a039c9
6dc93bc
 
8a039c9
6dc93bc
 
8a039c9
6dc93bc
 
 
 
 
 
 
 
 
 
aceae0f
43c9d7c
 
aceae0f
 
 
 
 
 
 
6dc93bc
aceae0f
 
3f03ec9
aceae0f
 
4427fcc
6dc93bc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
from fastapi import FastAPI, Request
from pydantic import BaseModel
import openai
import uvicorn
import requests
import re
import os

app = FastAPI()

MODEL = "meta-llama/Llama-3.3-70B-Instruct"
HF_TOKEN = os.environ["HF_TOKEN"]
HB_TOKEN = os.environ["HB_TOKEN"]
PROMPTS_DOC_URL = os.environ["PROMPTS"]

def fetch_prompts_from_google_doc():
    print("Fetching prompts from Google Doc...")
    response = requests.get(PROMPTS_DOC_URL)
    if response.status_code != 200:
        raise Exception("Failed to fetch document")
    
    text = response.text
    prompts = {}

    pattern = r"\{BEGIN (.*?)\}([\s\S]*?)\{END \1\}"
    matches = re.findall(pattern, text)

    for key, content in matches:
        prompts[key.strip()] = content.strip()

    return prompts

class NavigatePrompt(BaseModel):
    message: str
    code: str

class DrivePrompt(BaseModel):
    instruction: str
    code: str

@app.post("/drive")
async def drive(prompt: DrivePrompt):
    prompts = fetch_prompts_from_google_doc()
    print("Received POST to /drive")
    print("Instruction:", prompt.instruction)

    system_prompt = f"""
    ### Unit Information ###
    {prompts.get('UNIT_INFORMATION_DRIVER', '')}
    
    ### Role Description ###
    {prompts.get('ROLE_DESCRIPTION_DRIVER', '')}
    
    ### Behavioral Instructions ###
    {prompts.get('BEHAVIORAL_INSTRUCTIONS_DRIVER', '')}
    """

    user_prompt = f"""
    ### Instruction ###
    {prompt.instruction}
    
    ### Existing Code ###
    {prompt.code}
    """

    client = openai.OpenAI(
        api_key=HB_TOKEN,
        base_url="https://api.hyperbolic.xyz/v1",
    )

    chat_completion = client.chat.completions.create(
        model="meta-llama/Meta-Llama-3-70B-Instruct",
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt},
        ],
        temperature=0.7,
        max_tokens=1024,
    )

    reply = chat_completion.choices[0].message.content.strip()
    print("Drive response:", reply)
    return {"reply": reply}

@app.post("/navigate")
async def chat(prompt: NavigatePrompt):
    prompts = fetch_prompts_from_google_doc()
    print("Received POST request")
    print("Message:", prompt.message)

    system_prompt = f"""
    ### Unit Information ###
    {prompts.get('UNIT_INFORMATION_NAVIGATOR', '')}
    
    ### Role Description ###
    {prompts.get('ROLE_DESCRIPTION_NAVIGATOR', '')}
    
    ### Topic Information ###
    {prompts.get('TOPIC_INFORMATION_NAVIGATOR', '')}
    
    ### Task Description ###
    {prompts.get('TASK_DESCRIPTION_NAVIGATOR', '')}
    
    ### Reference Solution ###
    {prompts.get('REFERENCE_SOLUTION_NAVIGATOR', '')}
    
    ### Behavioral Instructions ###
    {prompts.get('BEHAVIORAL_INSTRUCTIONS_NAVIGATOR', '')}
    """
    
    user_prompt = f"""
    ### Message ###
    {prompt.message}
    
    ### Code ###
    {prompt.code}
    """
    
    client = openai.OpenAI(
        api_key=HB_TOKEN,
        base_url="https://api.hyperbolic.xyz/v1",
    )

    chat_completion = client.chat.completions.create(
        model="meta-llama/Meta-Llama-3-70B-Instruct",
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt},
        ],
        temperature=0.7,
        max_tokens=1024,
    )

    text_response = chat_completion.choices[0].message.content

    print("Text generation done", text_response.strip())
    return {"reply": text_response.strip()}