bhsinghgrid commited on
Commit
e9ba070
·
verified ·
1 Parent(s): 483e2dc

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +53 -26
app.py CHANGED
@@ -88,11 +88,15 @@ def _mlflow_event(run_name: str, params: dict | None = None, metrics: dict | Non
88
 
89
  def _build_flow_markdown(model_loaded=False, inference_ready=False, task_states=None):
90
  lines = ["### Execution Flow"]
 
 
91
  for i, step in enumerate(FLOW_STEPS, start=1):
92
  status = "⬜"
93
  if model_loaded and i <= 3:
94
  status = "✅"
95
- if inference_ready and i <= 11:
 
 
96
  status = "✅"
97
  lines.append(f"{status} {i}. {step}")
98
  if task_states:
@@ -477,9 +481,17 @@ def _run_analysis_cmd(task, ckpt_path, output_dir, input_text="dharmo rakṣati
477
  env.setdefault("HF_DATASETS_CACHE", "/tmp/hf_datasets")
478
  env.setdefault("HF_HUB_CACHE", "/tmp/hf_hub")
479
 
480
- proc = subprocess.run(cmd, capture_output=True, text=True, env=env)
481
- log = f"$ {' '.join(cmd)}\n\n{proc.stdout}\n{proc.stderr}"
482
- return proc.returncode, log, False
 
 
 
 
 
 
 
 
483
 
484
 
485
  def _bundle_task_outputs(model_bundle, output_dir):
@@ -648,24 +660,34 @@ def _bg_worker(job_id: str, model_bundle, output_dir: str, input_text: str, task
648
  "updated": datetime.now().isoformat(),
649
  }
650
  )
651
- code, log, used_bundled = _run_analysis_cmd(
652
- task,
653
- model_bundle["ckpt_path"],
654
- output_dir,
655
- input_text,
656
- task4_phase,
657
- task5_cfg.get("samples", 50),
658
- )
659
- logs.append(f"\n\n{'='*22} TASK {task} {'='*22}\n{log}")
660
- if code != 0:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
661
  failures += 1
662
  _BG_JOBS[job_id]["task_states"][task] = "failed"
663
- logs.append(f"\n[Live fallback]\n{_live_task_analysis(model_bundle, task, input_text, task5_cfg)}\n")
664
- elif used_bundled:
665
- _BG_JOBS[job_id]["task_states"][task] = "done(bundled)"
666
- logs.append(f"\n[Live bundled summary]\n{_live_task_analysis(model_bundle, task, input_text, task5_cfg)}\n")
667
- else:
668
- _BG_JOBS[job_id]["task_states"][task] = "done"
669
  _BG_JOBS[job_id].update(
670
  {
671
  "log": "".join(logs),
@@ -740,7 +762,7 @@ def start_run_all_background(model_bundle, output_dir, input_text, task4_phase,
740
  daemon=True,
741
  )
742
  th.start()
743
- flow = _build_flow_markdown(model_loaded=True, inference_ready=False, task_states=_BG_JOBS[job_id]["task_states"])
744
  return f"Background run started. Job ID: {job_id}", f"Job {job_id} queued...", job_id, _BG_JOBS[job_id]["task_states"], flow
745
 
746
 
@@ -756,7 +778,7 @@ def poll_run_all_background(job_id, output_dir):
756
  f"failures={j['failures']} | updated={j['updated']}"
757
  )
758
  outputs = refresh_task_outputs(output_dir)
759
- flow = _build_flow_markdown(model_loaded=True, inference_ready=False, task_states=j.get("task_states", {}))
760
  return status, j.get("log", ""), j.get("task_states", {}), flow, *outputs
761
 
762
 
@@ -778,9 +800,14 @@ def run_single_task(model_bundle, task, output_dir, input_text, task4_phase, tas
778
  elapsed = (time.perf_counter() - t0) * 1000.0
779
  if code != 0:
780
  _bundle_task_outputs(model_bundle, output_dir)
781
- log = f"{log}\n\n--- Live task analysis ---\n{_live_task_analysis(model_bundle, task, input_text, task5_cfg)}"
782
- status = f"Task {task} fallback mode: bundled reports + live input analysis."
783
- task_states[str(task)] = "failed"
 
 
 
 
 
784
  else:
785
  if used_bundled:
786
  _bundle_task_outputs(model_bundle, output_dir)
@@ -805,7 +832,7 @@ def run_single_task(model_bundle, task, output_dir, input_text, task4_phase, tas
805
  },
806
  tags={"source": "hf_space", "mode": "single_task"},
807
  )
808
- flow = _build_flow_markdown(model_loaded=True, inference_ready=False, task_states=task_states)
809
  return status, log, task_states, flow
810
 
811
 
 
88
 
89
  def _build_flow_markdown(model_loaded=False, inference_ready=False, task_states=None):
90
  lines = ["### Execution Flow"]
91
+ task_states = task_states or {}
92
+ any_task_activity = any(v != "pending" for v in task_states.values()) if task_states else False
93
  for i, step in enumerate(FLOW_STEPS, start=1):
94
  status = "⬜"
95
  if model_loaded and i <= 3:
96
  status = "✅"
97
+ if (inference_ready or model_loaded) and i <= 11:
98
+ status = "✅"
99
+ if i == 12 and any_task_activity:
100
  status = "✅"
101
  lines.append(f"{status} {i}. {step}")
102
  if task_states:
 
481
  env.setdefault("HF_DATASETS_CACHE", "/tmp/hf_datasets")
482
  env.setdefault("HF_HUB_CACHE", "/tmp/hf_hub")
483
 
484
+ timeout_map = {"1": 120, "2": 180, "3": 240, "4": 300, "5": 240}
485
+ timeout_s = int(os.environ.get("TASK_TIMEOUT_S", timeout_map.get(str(task), 180)))
486
+ try:
487
+ proc = subprocess.run(cmd, capture_output=True, text=True, env=env, timeout=timeout_s)
488
+ log = f"$ {' '.join(cmd)}\n\n{proc.stdout}\n{proc.stderr}"
489
+ return proc.returncode, log, False
490
+ except subprocess.TimeoutExpired as e:
491
+ out = e.stdout or ""
492
+ err = e.stderr or ""
493
+ log = f"$ {' '.join(cmd)}\n\n[timeout after {timeout_s}s]\n{out}\n{err}"
494
+ return 124, log, False
495
 
496
 
497
  def _bundle_task_outputs(model_bundle, output_dir):
 
660
  "updated": datetime.now().isoformat(),
661
  }
662
  )
663
+ try:
664
+ code, log, used_bundled = _run_analysis_cmd(
665
+ task,
666
+ model_bundle["ckpt_path"],
667
+ output_dir,
668
+ input_text,
669
+ task4_phase,
670
+ task5_cfg.get("samples", 50),
671
+ )
672
+ logs.append(f"\n\n{'='*22} TASK {task} {'='*22}\n{log}")
673
+ if code != 0:
674
+ failures += 1
675
+ try:
676
+ logs.append(f"\n[Live fallback]\n{_live_task_analysis(model_bundle, task, input_text, task5_cfg)}\n")
677
+ _BG_JOBS[job_id]["task_states"][task] = "done(live-fast)"
678
+ except Exception as live_e:
679
+ _BG_JOBS[job_id]["task_states"][task] = "failed"
680
+ logs.append(f"\n[Live fallback failed]\n{live_e}\n")
681
+ elif used_bundled:
682
+ _BG_JOBS[job_id]["task_states"][task] = "done(bundled)"
683
+ logs.append(f"\n[Live bundled summary]\n{_live_task_analysis(model_bundle, task, input_text, task5_cfg)}\n")
684
+ else:
685
+ _BG_JOBS[job_id]["task_states"][task] = "done"
686
+ except Exception as e:
687
  failures += 1
688
  _BG_JOBS[job_id]["task_states"][task] = "failed"
689
+ logs.append(f"\n\n{'='*22} TASK {task} {'='*22}\n[worker exception]\n{e}\n")
690
+ code, used_bundled = 1, False
 
 
 
 
691
  _BG_JOBS[job_id].update(
692
  {
693
  "log": "".join(logs),
 
762
  daemon=True,
763
  )
764
  th.start()
765
+ flow = _build_flow_markdown(model_loaded=True, inference_ready=True, task_states=_BG_JOBS[job_id]["task_states"])
766
  return f"Background run started. Job ID: {job_id}", f"Job {job_id} queued...", job_id, _BG_JOBS[job_id]["task_states"], flow
767
 
768
 
 
778
  f"failures={j['failures']} | updated={j['updated']}"
779
  )
780
  outputs = refresh_task_outputs(output_dir)
781
+ flow = _build_flow_markdown(model_loaded=True, inference_ready=True, task_states=j.get("task_states", {}))
782
  return status, j.get("log", ""), j.get("task_states", {}), flow, *outputs
783
 
784
 
 
800
  elapsed = (time.perf_counter() - t0) * 1000.0
801
  if code != 0:
802
  _bundle_task_outputs(model_bundle, output_dir)
803
+ try:
804
+ log = f"{log}\n\n--- Live task analysis ---\n{_live_task_analysis(model_bundle, task, input_text, task5_cfg)}"
805
+ status = f"Task {task} fallback mode: bundled reports + live input analysis."
806
+ task_states[str(task)] = "done(live-fast)"
807
+ except Exception as e:
808
+ log = f"{log}\n\n--- Live task analysis failed ---\n{e}"
809
+ status = f"Task {task} failed (and live fallback failed)."
810
+ task_states[str(task)] = "failed"
811
  else:
812
  if used_bundled:
813
  _bundle_task_outputs(model_bundle, output_dir)
 
832
  },
833
  tags={"source": "hf_space", "mode": "single_task"},
834
  )
835
+ flow = _build_flow_markdown(model_loaded=True, inference_ready=True, task_states=task_states)
836
  return status, log, task_states, flow
837
 
838