NeerajCodz commited on
Commit
f6b54cb
·
1 Parent(s): 715b529

fix: inference.py

Browse files
Files changed (1) hide show
  1. inference.py +66 -21
inference.py CHANGED
@@ -10,24 +10,61 @@ from typing import Any, Protocol
10
  from urllib import error as url_error
11
  from urllib import request as url_request
12
 
13
- from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
 
16
  # Required hackathon configuration variables
17
- API_BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1")
18
- MODEL_NAME = os.getenv("MODEL_NAME", "gpt-4.1-mini")
19
  HF_TOKEN = os.getenv("HF_TOKEN")
20
 
21
  # Optional runtime variables for local/OpenEnv execution
22
- ENV_API_BASE_URL = os.getenv("ENV_API_BASE_URL", "http://localhost:8000/api")
23
- TASK_NAME_DEFAULT = os.getenv("TASK_NAME", "task_001")
24
- BENCHMARK_DEFAULT = os.getenv("BENCHMARK", "openenv")
25
- MAX_STEPS_DEFAULT = int(os.getenv("MAX_STEPS", "12"))
26
- EPISODE_SEED_DEFAULT = int(os.getenv("EPISODE_SEED", "42"))
27
- LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.0"))
28
- PROMPT_HTML_LIMIT = int(os.getenv("PROMPT_HTML_LIMIT", "5000"))
29
- REQUEST_TIMEOUT_SECONDS = float(os.getenv("REQUEST_TIMEOUT_SECONDS", "30"))
30
- USE_OPENENV_SDK = os.getenv("USE_OPENENV_SDK", "true").lower() in {"1", "true", "yes", "on"}
31
 
32
 
33
  @dataclass
@@ -243,7 +280,7 @@ def _build_llm_prompt(
243
 
244
 
245
  def _llm_next_action(
246
- client: OpenAI,
247
  task_name: str,
248
  benchmark: str,
249
  observation: dict[str, Any],
@@ -466,6 +503,8 @@ def run_inference(task_name: str, benchmark: str, max_steps: int, seed: int, env
466
  if HF_TOKEN is None:
467
  raise ValueError("HF_TOKEN environment variable is required")
468
 
 
 
469
  client = OpenAI(base_url=API_BASE_URL, api_key=HF_TOKEN)
470
  adapter = _build_adapter(benchmark=benchmark, env_api_base_url=env_api_base_url)
471
  observation, info = adapter.reset(task_name=task_name, seed=seed)
@@ -528,12 +567,18 @@ def parse_args() -> argparse.Namespace:
528
 
529
 
530
  if __name__ == "__main__":
531
- args = parse_args()
532
- exit_code = run_inference(
533
- task_name=args.task,
534
- benchmark=args.benchmark,
535
- max_steps=args.max_steps,
536
- seed=args.seed,
537
- env_api_base_url=args.env_api_base_url,
538
- )
 
 
 
 
 
 
539
  sys.exit(exit_code)
 
10
  from urllib import error as url_error
11
  from urllib import request as url_request
12
 
13
+ def _env_str(name: str, default: str) -> str:
14
+ value = os.getenv(name)
15
+ if value is None:
16
+ return default
17
+ cleaned = value.strip()
18
+ return cleaned if cleaned else default
19
+
20
+
21
+ def _env_int(name: str, default: int) -> int:
22
+ value = os.getenv(name)
23
+ if value is None:
24
+ return default
25
+ try:
26
+ return int(value.strip())
27
+ except Exception:
28
+ return default
29
+
30
+
31
+ def _env_float(name: str, default: float) -> float:
32
+ value = os.getenv(name)
33
+ if value is None:
34
+ return default
35
+ try:
36
+ return float(value.strip())
37
+ except Exception:
38
+ return default
39
+
40
+
41
+ def _env_bool(name: str, default: bool) -> bool:
42
+ value = os.getenv(name)
43
+ if value is None:
44
+ return default
45
+ cleaned = value.strip().lower()
46
+ if cleaned in {"1", "true", "yes", "on"}:
47
+ return True
48
+ if cleaned in {"0", "false", "no", "off"}:
49
+ return False
50
+ return default
51
 
52
 
53
  # Required hackathon configuration variables
54
+ API_BASE_URL = _env_str("API_BASE_URL", "https://api.openai.com/v1")
55
+ MODEL_NAME = _env_str("MODEL_NAME", "gpt-4.1-mini")
56
  HF_TOKEN = os.getenv("HF_TOKEN")
57
 
58
  # Optional runtime variables for local/OpenEnv execution
59
+ ENV_API_BASE_URL = _env_str("ENV_API_BASE_URL", "http://localhost:8000/api")
60
+ TASK_NAME_DEFAULT = _env_str("TASK_NAME", "task_001")
61
+ BENCHMARK_DEFAULT = _env_str("BENCHMARK", "openenv")
62
+ MAX_STEPS_DEFAULT = _env_int("MAX_STEPS", 12)
63
+ EPISODE_SEED_DEFAULT = _env_int("EPISODE_SEED", 42)
64
+ LLM_TEMPERATURE = _env_float("LLM_TEMPERATURE", 0.0)
65
+ PROMPT_HTML_LIMIT = _env_int("PROMPT_HTML_LIMIT", 5000)
66
+ REQUEST_TIMEOUT_SECONDS = _env_float("REQUEST_TIMEOUT_SECONDS", 30.0)
67
+ USE_OPENENV_SDK = _env_bool("USE_OPENENV_SDK", True)
68
 
69
 
70
  @dataclass
 
280
 
281
 
282
  def _llm_next_action(
283
+ client: Any,
284
  task_name: str,
285
  benchmark: str,
286
  observation: dict[str, Any],
 
503
  if HF_TOKEN is None:
504
  raise ValueError("HF_TOKEN environment variable is required")
505
 
506
+ from openai import OpenAI
507
+
508
  client = OpenAI(base_url=API_BASE_URL, api_key=HF_TOKEN)
509
  adapter = _build_adapter(benchmark=benchmark, env_api_base_url=env_api_base_url)
510
  observation, info = adapter.reset(task_name=task_name, seed=seed)
 
567
 
568
 
569
  if __name__ == "__main__":
570
+ try:
571
+ args = parse_args()
572
+ exit_code = run_inference(
573
+ task_name=args.task,
574
+ benchmark=args.benchmark,
575
+ max_steps=args.max_steps,
576
+ seed=args.seed,
577
+ env_api_base_url=args.env_api_base_url,
578
+ )
579
+ except Exception:
580
+ # Last-resort guard: never allow an unhandled exception to escape.
581
+ _emit_start(task_name=TASK_NAME_DEFAULT, benchmark=BENCHMARK_DEFAULT, model_name=MODEL_NAME)
582
+ _emit_end(success=False, steps=0, rewards=[])
583
+ exit_code = 1
584
  sys.exit(exit_code)