UjjwalPardeshi commited on
Commit ·
5b89fc4
1
Parent(s): dbae750
fix: test LLM proxy call before env connection to ensure proxy usage
Browse files- inference.py +9 -0
inference.py
CHANGED
|
@@ -197,6 +197,15 @@ async def main() -> None:
|
|
| 197 |
try:
|
| 198 |
client = OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
| 199 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
# Connect to environment — same pattern as sample script
|
| 201 |
if IMAGE_NAME:
|
| 202 |
env = await GenericEnvClient.from_docker_image(IMAGE_NAME)
|
|
|
|
| 197 |
try:
|
| 198 |
client = OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
|
| 199 |
|
| 200 |
+
# --- Verify LLM proxy works BEFORE connecting to env ---
|
| 201 |
+
print("[DEBUG] Testing LLM proxy connection...", flush=True)
|
| 202 |
+
test_resp = client.chat.completions.create(
|
| 203 |
+
model=MODEL_NAME,
|
| 204 |
+
messages=[{"role": "user", "content": "Say OK"}],
|
| 205 |
+
max_tokens=5,
|
| 206 |
+
)
|
| 207 |
+
print(f"[DEBUG] LLM proxy test OK: {test_resp.choices[0].message.content}", flush=True)
|
| 208 |
+
|
| 209 |
# Connect to environment — same pattern as sample script
|
| 210 |
if IMAGE_NAME:
|
| 211 |
env = await GenericEnvClient.from_docker_image(IMAGE_NAME)
|