davanstrien HF Staff Claude Opus 4.6 commited on
Commit
9642a35
·
1 Parent(s): 54f1ed0

Update ocr-vllm-judge.py: improved eval prompt + --save-results flag

Browse files

- Rewrite evaluation prompt to prioritize faithfulness over formatting,
heavily penalizing added commentary/interpretation
- Add --save-results flag to push judge results (comparisons, leaderboard,
metadata) to HF Hub dataset repo

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (1) hide show
  1. ocr-vllm-judge.py +70 -5
ocr-vllm-judge.py CHANGED
@@ -101,11 +101,21 @@ def image_to_base64(image: Image.Image) -> str:
101
  # --- Judge prompt ---
102
  PAIRWISE_PROMPT = """You are an expert OCR quality evaluator. You are given a document image and TWO OCR outputs (A and B) extracted from that same image.
103
 
104
- Compare them and decide which extraction is better overall. Consider:
105
- - Accuracy: correct characters, no hallucinations
106
- - Completeness: all text captured
107
- - Formatting: clean structure (ignore bounding box tags like <|ref|> <|det|> if present)
108
- - Reading order: natural flow
 
 
 
 
 
 
 
 
 
 
109
 
110
  Output A:
111
  ---
@@ -680,6 +690,11 @@ Examples:
680
  default=0.85,
681
  help="vLLM GPU memory fraction (default: 0.85)",
682
  )
 
 
 
 
 
683
  args = parser.parse_args()
684
 
685
  # --- CUDA check ---
@@ -753,6 +768,56 @@ Examples:
753
  )
754
  print_elo_leaderboard(elo, wins, losses, ties)
755
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
756
  # --- Sample comparisons ---
757
  console.print("\n[bold]Sample comparisons:[/bold]")
758
  for entry in comparison_log[:5]:
 
101
  # --- Judge prompt ---
102
  PAIRWISE_PROMPT = """You are an expert OCR quality evaluator. You are given a document image and TWO OCR outputs (A and B) extracted from that same image.
103
 
104
+ Compare them and decide which extraction is better overall.
105
+
106
+ Evaluation criteria (in priority order):
107
+
108
+ 1. Faithfulness: The output must ONLY contain text from the document. Any added commentary, interpretation, or notes (e.g. "it appears the text says...", "the document contains...") is a serious error. Penalize heavily.
109
+
110
+ 2. Completeness: ALL visible text must be captured — headers, footers, marginalia, stamps, handwritten notes. Missing any section of text is a significant penalty.
111
+
112
+ 3. Accuracy: Correct characters, no hallucinated words or garbled text.
113
+
114
+ 4. Reading order: Text flows naturally as a human would read the document.
115
+
116
+ 5. Formatting: Clean structure. Ignore bounding box tags like <|ref|> <|det|> if present. Do NOT prefer fancier markdown formatting — plain accurate text is better than nicely formatted but incomplete text.
117
+
118
+ If both outputs capture the same text with similar accuracy, respond with "tie". Only pick a winner when there is a clear quality difference.
119
 
120
  Output A:
121
  ---
 
690
  default=0.85,
691
  help="vLLM GPU memory fraction (default: 0.85)",
692
  )
693
+ parser.add_argument(
694
+ "--save-results",
695
+ default=None,
696
+ help="Push judge results to this HF dataset repo (e.g. davanstrien/ocr-bench-rubenstein-judge)",
697
+ )
698
  args = parser.parse_args()
699
 
700
  # --- CUDA check ---
 
768
  )
769
  print_elo_leaderboard(elo, wins, losses, ties)
770
 
771
+ # --- Save results ---
772
+ if args.save_results:
773
+ import datetime
774
+
775
+ console.print(f"\n[bold]Saving results to:[/bold] {args.save_results}")
776
+
777
+ # Comparisons config
778
+ comp_ds = Dataset.from_list(comparison_log)
779
+ comp_ds.push_to_hub(args.save_results, config_name="comparisons")
780
+ console.print(f" Pushed [cyan]comparisons[/cyan] ({len(comparison_log)} rows)")
781
+
782
+ # Leaderboard config
783
+ ranked = sorted(elo.items(), key=lambda x: x[1], reverse=True)
784
+ leaderboard_rows = []
785
+ for model, rating in ranked:
786
+ total = wins[model] + losses[model] + ties[model]
787
+ leaderboard_rows.append(
788
+ {
789
+ "model": model,
790
+ "elo": round(rating),
791
+ "wins": wins[model],
792
+ "losses": losses[model],
793
+ "ties": ties[model],
794
+ "win_pct": round(wins[model] / total * 100) if total > 0 else 0,
795
+ }
796
+ )
797
+ Dataset.from_list(leaderboard_rows).push_to_hub(
798
+ args.save_results, config_name="leaderboard"
799
+ )
800
+ console.print(
801
+ f" Pushed [cyan]leaderboard[/cyan] ({len(leaderboard_rows)} rows)"
802
+ )
803
+
804
+ # Metadata config
805
+ metadata_row = {
806
+ "source_dataset": args.dataset,
807
+ "judge_models": json.dumps(judge_models),
808
+ "seed": args.seed,
809
+ "max_samples": args.max_samples or len(ds),
810
+ "total_comparisons": len(comparisons),
811
+ "valid_comparisons": len(comparison_log),
812
+ "timestamp": datetime.datetime.now(datetime.timezone.utc).isoformat(),
813
+ "from_prs": args.from_prs,
814
+ }
815
+ Dataset.from_list([metadata_row]).push_to_hub(
816
+ args.save_results, config_name="metadata"
817
+ )
818
+ console.print(" Pushed [cyan]metadata[/cyan]")
819
+ console.print(f" [green]Results saved to: {args.save_results}[/green]")
820
+
821
  # --- Sample comparisons ---
822
  console.print("\n[bold]Sample comparisons:[/bold]")
823
  for entry in comparison_log[:5]: