Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- draw/images/alluvial_diagram.html +0 -0
- draw/tools/annotator_stat.py +131 -0
- draw/tools/bon_line.py +284 -0
- draw/tools/context_requirement_bar.py +134 -0
- draw/tools/data_source_distribution.py +207 -0
- draw/tools/fitst_image.py +133 -0
- draw/tools/length_heatmap.py +131 -0
- draw/tools/passn_line.py +380 -0
- draw/tools/similarity_scatter_3d.py +214 -0
- draw/tools/strategy_dimension.py +211 -0
- draw/tools/strategy_time.py +85 -0
- draw/tools/task_alluvial_diagram.py +91 -0
- draw/tools/task_distrubution.py +276 -0
- draw/tools/task_radar.py +154 -0
- draw/tools/text_source_compare.py +241 -0
- eval/model/Qwen3-Embedding-8B/1 +0 -0
- eval/model/Tokenizers/claude/tokenizer.json +0 -0
- eval/model/Tokenizers/claude/tokenizer_config.json +9 -0
- eval/model/Tokenizers/gemini/tokenizer_config.json +0 -0
- eval/model/Tokenizers/gpt/tokenizer_config.json +183 -0
- eval/model/Tokenizers/qwen/tokenizer.json +0 -0
- eval/model/Tokenizers/qwen/tokenizer_config.json +207 -0
- eval/modules/__init__.py +6 -0
- eval/modules/data_loader.py +141 -0
- eval/modules/evaluation.py +219 -0
- eval/modules/inference.py +72 -0
- eval/modules/model_manager.py +769 -0
- eval/modules/utils.py +246 -0
- eval/output/Claude-3.7-Sonnet/nonthinking_context-120000_bon-3_summary.json +164 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_1-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_2-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_4-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_5-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_7-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_8-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_summary.json +164 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_1-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_2-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_3-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_4-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_5-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_6-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_7-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_8-of-8.jsonl +0 -0
- eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_summary.json +164 -0
- eval/output/GPT-5/thinking_context-262144_bon-3_summary.json +164 -0
- eval/output/Gemini-3-Pro/thinking_context-1000000_bon-3_summary_1.json +164 -0
- eval/output/Qwen3-32B/thinking_context-120000_bon-3_summary.json +164 -0
- eval/output/Qwen3-8B/nonthinking_context-120000_bon-3_summary.json +164 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
toolbox/model_predict/data/doc/30-殷勇代表:把群众冷暖当作头等大事.docx filter=lfs diff=lfs merge=lfs -text
|
draw/images/alluvial_diagram.html
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
draw/tools/annotator_stat.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
from matplotlib import font_manager as fm
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
# --- Style Configuration from strategy_time.py ---
|
| 8 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 9 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 10 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 11 |
+
try:
|
| 12 |
+
fm.fontManager.addfont(font_path)
|
| 13 |
+
fm.fontManager.addfont(font_bold_path)
|
| 14 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 15 |
+
except:
|
| 16 |
+
pass # Fallback if fonts not found
|
| 17 |
+
|
| 18 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 19 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 20 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 21 |
+
plt.rcParams['font.size'] = 12
|
| 22 |
+
|
| 23 |
+
# Colors - Refined Palette for better aesthetics
|
| 24 |
+
# Using a professional palette (e.g., inspired by AntV or similar high-quality viz libraries)
|
| 25 |
+
extended_colors = [
|
| 26 |
+
'#5B8FF9', # Blue
|
| 27 |
+
'#5AD8A6', # Green
|
| 28 |
+
'#5D7092', # Grey Blue
|
| 29 |
+
'#F6BD16', # Yellow
|
| 30 |
+
'#E8684A', # Red/Orange
|
| 31 |
+
'#6DC8EC', # Light Blue
|
| 32 |
+
'#9270CA', # Purple
|
| 33 |
+
'#FF9D4D', # Orange
|
| 34 |
+
'#269A99', # Teal
|
| 35 |
+
'#FF99C3', # Pink
|
| 36 |
+
'#A092F1', # Light Purple
|
| 37 |
+
'#BDD2FD', # Pale Blue
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
# --- Data ---
|
| 41 |
+
data = {
|
| 42 |
+
"Role": {
|
| 43 |
+
"labels": ["General Annotator", "Long-context Expert"],
|
| 44 |
+
"values": [51, 12]
|
| 45 |
+
},
|
| 46 |
+
"Age": {
|
| 47 |
+
"labels": ["23-24", "25-26", "27-28", "29-30", "31-32"],
|
| 48 |
+
"values": [6, 12, 7, 3, 2]
|
| 49 |
+
},
|
| 50 |
+
"Gender": {
|
| 51 |
+
"labels": ["Male", "Female"],
|
| 52 |
+
"values": [15, 15]
|
| 53 |
+
},
|
| 54 |
+
"Major": {
|
| 55 |
+
"labels": ["CS", "Electronic Info", "Industrial Eng", "Economics",
|
| 56 |
+
"Management", "Literature", "Arts", "Agriculture",
|
| 57 |
+
"Chemistry", "Psychology", "Law"],
|
| 58 |
+
"values": [10, 3, 2, 2, 3, 4, 2, 1, 1, 1, 1]
|
| 59 |
+
},
|
| 60 |
+
"Experience": {
|
| 61 |
+
"labels": ["<0.5 years", "0.5-1 years", "1-2 years", ">2 years"],
|
| 62 |
+
"values": [1, 13, 10, 6]
|
| 63 |
+
},
|
| 64 |
+
"Education": {
|
| 65 |
+
"labels": ["Bachelor", "Master", "PhD"],
|
| 66 |
+
"values": [23, 3, 4]
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
# Create figure with 2 rows, 3 columns
|
| 71 |
+
fig, axes = plt.subplots(2, 3, figsize=(14, 8))
|
| 72 |
+
axes = axes.flatten()
|
| 73 |
+
|
| 74 |
+
# Plot each pie chart
|
| 75 |
+
for i, (title, content) in enumerate(data.items()):
|
| 76 |
+
ax = axes[i]
|
| 77 |
+
labels = content["labels"]
|
| 78 |
+
values = content["values"]
|
| 79 |
+
|
| 80 |
+
# Use extended colors
|
| 81 |
+
current_colors = extended_colors[:len(labels)]
|
| 82 |
+
|
| 83 |
+
# Calculate percentages manually to handle placement logic
|
| 84 |
+
total_val = sum(values)
|
| 85 |
+
|
| 86 |
+
wedges, texts, autotexts = ax.pie(
|
| 87 |
+
values,
|
| 88 |
+
labels=labels,
|
| 89 |
+
autopct='%1.1f%%',
|
| 90 |
+
startangle=90,
|
| 91 |
+
colors=current_colors,
|
| 92 |
+
wedgeprops={'linewidth': 1.2, 'edgecolor': 'white'},
|
| 93 |
+
textprops={'fontsize': 10, 'fontweight': 'bold', 'family': 'Arial'},
|
| 94 |
+
pctdistance=0.75
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Style the percentage text
|
| 98 |
+
for j, autotext in enumerate(autotexts):
|
| 99 |
+
percent = values[j] / total_val
|
| 100 |
+
autotext.set_color('white')
|
| 101 |
+
autotext.set_weight('bold')
|
| 102 |
+
|
| 103 |
+
# Special handling for small slices to avoid overlap
|
| 104 |
+
if percent < 0.05: # For slices < 5%
|
| 105 |
+
autotext.set_fontsize(8) # Smaller font
|
| 106 |
+
# Move slightly outward
|
| 107 |
+
x, y = autotext.get_position()
|
| 108 |
+
# Calculate direction vector from center (0,0)
|
| 109 |
+
norm = np.sqrt(x*x + y*y)
|
| 110 |
+
if norm > 0:
|
| 111 |
+
# Push out from 0.75 radius to 0.85 for small items
|
| 112 |
+
new_x = x / norm * 0.88
|
| 113 |
+
new_y = y / norm * 0.88
|
| 114 |
+
autotext.set_position((new_x, new_y))
|
| 115 |
+
else:
|
| 116 |
+
autotext.set_fontsize(10)
|
| 117 |
+
|
| 118 |
+
# Style the labels
|
| 119 |
+
for text in texts:
|
| 120 |
+
text.set_weight('bold')
|
| 121 |
+
text.set_color('#555555')
|
| 122 |
+
|
| 123 |
+
ax.set_title(title, fontsize=20, fontweight='bold', pad=10, fontfamily='Arial')
|
| 124 |
+
|
| 125 |
+
plt.tight_layout()
|
| 126 |
+
save_path = '/cpfs/user/chenziyang/LongBenchmark/draw/images/annotator_stat_pie.png'
|
| 127 |
+
# Ensure directory exists
|
| 128 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
| 129 |
+
|
| 130 |
+
fig.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 131 |
+
print(f"Saved to {save_path}")
|
draw/tools/bon_line.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import numpy as np
|
| 3 |
+
from matplotlib import font_manager as fm
|
| 4 |
+
from matplotlib.font_manager import FontProperties
|
| 5 |
+
from matplotlib.ticker import FixedLocator
|
| 6 |
+
|
| 7 |
+
# 设置风格
|
| 8 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 9 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 10 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 11 |
+
try:
|
| 12 |
+
fm.fontManager.addfont(font_path)
|
| 13 |
+
fm.fontManager.addfont(font_bold_path)
|
| 14 |
+
except Exception:
|
| 15 |
+
pass
|
| 16 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 17 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 18 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 19 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 20 |
+
|
| 21 |
+
# 模拟数据:每个模型3个数据点
|
| 22 |
+
num_points = 3
|
| 23 |
+
x = np.arange(1, num_points + 1)
|
| 24 |
+
|
| 25 |
+
subplot_names = [
|
| 26 |
+
"Overall",
|
| 27 |
+
"English",
|
| 28 |
+
"Chinese",
|
| 29 |
+
"Extreme",
|
| 30 |
+
"Hard",
|
| 31 |
+
"Moderate",
|
| 32 |
+
"Easy",
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
# 模型样式定义:颜色和标记
|
| 36 |
+
model_style = {
|
| 37 |
+
'Gemini-2.5-Pro': {'color': '#E64B35', 'marker': 'o'},
|
| 38 |
+
'GPT-5': {'color': '#4DBBD5', 'marker': 's'},
|
| 39 |
+
'Claude-4-Sonnet': {'color': '#00A087', 'marker': '^'},
|
| 40 |
+
'DeepSeek-V3.2': {'color': '#F39B7F', 'marker': 'v'},
|
| 41 |
+
'Qwen3-235B-A22B-Thinking-2507': {'color': '#3C5488', 'marker': 'D'},
|
| 42 |
+
'GLM-4.6': {'color': '#91D1C2', 'marker': '<'},
|
| 43 |
+
'Kimi-K2-Instruct-0905': {'color': '#925E9F', 'marker': '>'},
|
| 44 |
+
'MiniMax-M2': {'color': '#8491B4', 'marker': 'p'},
|
| 45 |
+
'Ministral-3-14B-Instruct-2512': {'color': '#7E6148', 'marker': 'h'},
|
| 46 |
+
'Llama-3.1-405B-Instruct': {'color': '#B09C85', 'marker': '*'},
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
# Subplot Data
|
| 50 |
+
data_1 = {
|
| 51 |
+
'Gemini-2.5-Pro': [74.02, 79.2, 81.34],
|
| 52 |
+
'GPT-5': [72.43, 77.34, 79.98],
|
| 53 |
+
'Claude-4-Sonnet': [70.2, 76.63, 79.08],
|
| 54 |
+
'DeepSeek-V3.2': [67.16, 73.91, 76.95],
|
| 55 |
+
'Qwen3-235B-A22B-Thinking-2507': [66.46, 74.79, 78.22],
|
| 56 |
+
'GLM-4.6': [59.0, 66.59, 70.68],
|
| 57 |
+
'Kimi-K2-Instruct-0905': [55.89, 63.15, 65.97],
|
| 58 |
+
'MiniMax-M2': [53.52, 63.11, 68.38],
|
| 59 |
+
'Ministral-3-14B-Instruct-2512': [45.85, 52.28, 55.42],
|
| 60 |
+
'Llama-3.1-405B-Instruct': [39.9, 47.48, 51.13],
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
data_2 = {
|
| 64 |
+
'Gemini-2.5-Pro': [73.22, 78.89, 80.64],
|
| 65 |
+
'GPT-5': [72.6, 78.13, 81.23],
|
| 66 |
+
'Claude-4-Sonnet': [70.29, 77.67, 80.24],
|
| 67 |
+
'DeepSeek-V3.2': [67.37, 74.01, 76.9],
|
| 68 |
+
'Qwen3-235B-A22B-Thinking-2507': [66.47, 74.78, 78.22],
|
| 69 |
+
'GLM-4.6': [57.25, 64.37, 69.12],
|
| 70 |
+
'Kimi-K2-Instruct-0905': [58.34, 64.55, 67.29],
|
| 71 |
+
'MiniMax-M2': [53.4, 63.78, 69.49],
|
| 72 |
+
'Ministral-3-14B-Instruct-2512': [48.43, 53.86, 57.18],
|
| 73 |
+
'Llama-3.1-405B-Instruct': [43.34, 51.2, 54.54],
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
data_3 = {
|
| 77 |
+
'Gemini-2.5-Pro': [74.83, 79.52, 82.03],
|
| 78 |
+
'GPT-5': [72.26, 76.54, 78.72],
|
| 79 |
+
'Claude-4-Sonnet': [70.11, 75.59, 77.91],
|
| 80 |
+
'DeepSeek-V3.2': [66.96, 73.8, 76.99],
|
| 81 |
+
'Qwen3-235B-A22B-Thinking-2507': [66.44, 74.79, 78.22],
|
| 82 |
+
'GLM-4.6': [60.76, 68.81, 72.25],
|
| 83 |
+
'Kimi-K2-Instruct-0905': [53.44, 61.76, 64.64],
|
| 84 |
+
'MiniMax-M2': [53.63, 62.44, 67.27],
|
| 85 |
+
'Ministral-3-14B-Instruct-2512': [43.27, 50.69, 53.67],
|
| 86 |
+
'Llama-3.1-405B-Instruct': [36.46, 43.77, 47.72],
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
data_4 = {
|
| 90 |
+
'Gemini-2.5-Pro': [50.85, 55.54, 57.98],
|
| 91 |
+
'GPT-5': [48.55, 52.06, 54.09],
|
| 92 |
+
'Claude-4-Sonnet': [47.12, 51.88, 53.73],
|
| 93 |
+
'DeepSeek-V3.2': [43.84, 48.13, 50.59],
|
| 94 |
+
'Qwen3-235B-A22B-Thinking-2507': [44.17, 48.88, 51.0],
|
| 95 |
+
'GLM-4.6': [38.72, 44.14, 47.65],
|
| 96 |
+
'Kimi-K2-Instruct-0905': [38.06, 43.54, 46.66],
|
| 97 |
+
'MiniMax-M2': [34.68, 42.03, 46.41],
|
| 98 |
+
'Ministral-3-14B-Instruct-2512': [31.25, 35.72, 38.6],
|
| 99 |
+
'Llama-3.1-405B-Instruct': [30.03, 34.87, 37.03],
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
data_5 = {
|
| 103 |
+
'Gemini-2.5-Pro': [79.87, 86.78, 90.14],
|
| 104 |
+
'GPT-5': [80.18, 86.04, 88.67],
|
| 105 |
+
'Claude-4-Sonnet': [74.23, 83.01, 86.56],
|
| 106 |
+
'DeepSeek-V3.2': [65.77, 75.91, 80.14],
|
| 107 |
+
'Qwen3-235B-A22B-Thinking-2507': [64.49, 76.57, 80.85],
|
| 108 |
+
'GLM-4.6': [48.91, 56.32, 60.27],
|
| 109 |
+
'Kimi-K2-Instruct-0905': [43.82, 51.11, 54.81],
|
| 110 |
+
'MiniMax-M2': [41.65, 50.38, 55.71],
|
| 111 |
+
'Ministral-3-14B-Instruct-2512': [37.58, 43.73, 46.91],
|
| 112 |
+
'Llama-3.1-405B-Instruct': [34.18, 39.52, 43.5],
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
data_6 = {
|
| 116 |
+
'Gemini-2.5-Pro': [85.2, 89.77, 91.27],
|
| 117 |
+
'GPT-5': [81.41, 86.95, 91.17],
|
| 118 |
+
'Claude-4-Sonnet': [75.38, 84.9, 87.4],
|
| 119 |
+
'DeepSeek-V3.2': [73.91, 82.89, 86.86],
|
| 120 |
+
'Qwen3-235B-A22B-Thinking-2507': [75.92, 84.02, 89.61],
|
| 121 |
+
'GLM-4.6': [61.84, 73.27, 79.19],
|
| 122 |
+
'Kimi-K2-Instruct-0905': [57.69, 67.85, 72.24],
|
| 123 |
+
'MiniMax-M2': [60.77, 72.6, 79.59],
|
| 124 |
+
'Ministral-3-14B-Instruct-2512': [39.58, 46.38, 50.12],
|
| 125 |
+
'Llama-3.1-405B-Instruct': [26.87, 33.21, 37.55],
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
data_7 = {
|
| 129 |
+
'Gemini-2.5-Pro': [84.97, 89.92, 91.43],
|
| 130 |
+
'GPT-5': [84.19, 89.43, 91.68],
|
| 131 |
+
'Claude-4-Sonnet': [85.75, 90.44, 92.74],
|
| 132 |
+
'DeepSeek-V3.2': [85.25, 90.85, 93.16],
|
| 133 |
+
'Qwen3-235B-A22B-Thinking-2507': [82.31, 91.83, 94.66],
|
| 134 |
+
'GLM-4.6': [81.88, 89.23, 92.86],
|
| 135 |
+
'Kimi-K2-Instruct-0905': [78.34, 85.47, 86.52],
|
| 136 |
+
'MiniMax-M2': [73.48, 84.29, 89.33],
|
| 137 |
+
'Ministral-3-14B-Instruct-2512': [67.89, 76.04, 79.06],
|
| 138 |
+
'Llama-3.1-405B-Instruct': [60.16, 72.33, 76.72],
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
data_list = [data_1, data_2, data_3, data_4, data_5, data_6, data_7]
|
| 142 |
+
|
| 143 |
+
# 子图布局:上3下4
|
| 144 |
+
# 实际上是2行4列,第一行前3个放图,第4个放图例;第二行4个全放图
|
| 145 |
+
# 调整figsize使得每个子图区域接近正方形:2行4列,宽高比应为4:2=2:1
|
| 146 |
+
fig = plt.figure(figsize=(16, 8))
|
| 147 |
+
gs = fig.add_gridspec(2, 4, width_ratios=[1, 1, 1, 1], height_ratios=[1, 1]) # 2行4列的网格
|
| 148 |
+
|
| 149 |
+
axes = []
|
| 150 |
+
|
| 151 |
+
# 上面3个子图
|
| 152 |
+
for i in range(3):
|
| 153 |
+
ax = fig.add_subplot(gs[0, i])
|
| 154 |
+
ax.set_box_aspect(1) # 设置为正方形
|
| 155 |
+
axes.append(ax)
|
| 156 |
+
|
| 157 |
+
# 下面4个子图
|
| 158 |
+
for i in range(4):
|
| 159 |
+
ax = fig.add_subplot(gs[1, i])
|
| 160 |
+
ax.set_box_aspect(1) # 设置为正方形
|
| 161 |
+
axes.append(ax)
|
| 162 |
+
|
| 163 |
+
# 计算统一的纵轴范围
|
| 164 |
+
def get_group_ylim(indices):
|
| 165 |
+
group_data = [data_list[idx] for idx in indices]
|
| 166 |
+
min_vals = []
|
| 167 |
+
max_vals = []
|
| 168 |
+
for d in group_data:
|
| 169 |
+
min_vals.append(min(v[0] for v in d.values()))
|
| 170 |
+
max_vals.append(max(v[2] for v in d.values()))
|
| 171 |
+
|
| 172 |
+
g_min = min(min_vals)
|
| 173 |
+
g_max = max(max_vals)
|
| 174 |
+
y_range = g_max - g_min
|
| 175 |
+
padding = y_range * 0.15 if y_range > 0 else 1.0
|
| 176 |
+
return g_min - padding, g_max + padding
|
| 177 |
+
|
| 178 |
+
group1_ylim = get_group_ylim([0, 1, 2]) # Overall, English, Chinese
|
| 179 |
+
group2_ylim = get_group_ylim([4, 5]) # Hard, Moderate
|
| 180 |
+
|
| 181 |
+
# 绘制折线图
|
| 182 |
+
for i, ax in enumerate(axes):
|
| 183 |
+
current_data = data_list[i]
|
| 184 |
+
|
| 185 |
+
# 计算数据的最小值和最大值,增加上下留白
|
| 186 |
+
# 最大值按照N=3的值域 最小值按照N=1的值域
|
| 187 |
+
min_val = min(v[0] for v in current_data.values())
|
| 188 |
+
max_val = max(v[2] for v in current_data.values())
|
| 189 |
+
y_range = max_val - min_val
|
| 190 |
+
padding = y_range * 0.15 if y_range > 0 else 1.0
|
| 191 |
+
|
| 192 |
+
ylim_to_use = (min_val - padding, max_val + padding)
|
| 193 |
+
if i in [0, 1, 2]:
|
| 194 |
+
ylim_to_use = group1_ylim
|
| 195 |
+
elif i in [4, 5]:
|
| 196 |
+
ylim_to_use = group2_ylim
|
| 197 |
+
|
| 198 |
+
for model_name, style in model_style.items():
|
| 199 |
+
if model_name not in current_data:
|
| 200 |
+
continue
|
| 201 |
+
|
| 202 |
+
vals = current_data[model_name]
|
| 203 |
+
|
| 204 |
+
ax.plot(
|
| 205 |
+
x,
|
| 206 |
+
vals,
|
| 207 |
+
label=model_name,
|
| 208 |
+
color=style['color'],
|
| 209 |
+
marker=style['marker'],
|
| 210 |
+
linewidth=1.8,
|
| 211 |
+
markersize=7,
|
| 212 |
+
markerfacecolor=style['color'],
|
| 213 |
+
markeredgecolor=style['color'],
|
| 214 |
+
markeredgewidth=1.5
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
# 标记N=3处的最高值
|
| 218 |
+
# Find model with highest value at index 2
|
| 219 |
+
best_model = max(current_data.keys(), key=lambda m: current_data[m][2])
|
| 220 |
+
best_val = current_data[best_model][2]
|
| 221 |
+
best_style = model_style[best_model]
|
| 222 |
+
|
| 223 |
+
ax.annotate(
|
| 224 |
+
f'{best_val}',
|
| 225 |
+
(x[2], best_val),
|
| 226 |
+
textcoords='offset points',
|
| 227 |
+
xytext=(-5, 5),
|
| 228 |
+
ha='right',
|
| 229 |
+
va='bottom',
|
| 230 |
+
fontsize=11,
|
| 231 |
+
fontweight='bold',
|
| 232 |
+
color=best_style['color'],
|
| 233 |
+
fontfamily='Arial'
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
ax.set_xlabel('N (Best-of-N)', fontsize=12, labelpad=8, fontweight='bold', fontfamily='Arial')
|
| 237 |
+
ax.set_ylabel('Score', fontsize=12, labelpad=8, fontweight='bold', fontfamily='Arial')
|
| 238 |
+
|
| 239 |
+
ax.set_ylim(ylim_to_use)
|
| 240 |
+
|
| 241 |
+
# 将标题放在图内部的左上角,灰色斜体
|
| 242 |
+
font_prop = FontProperties(family='Arial', style='italic', size=14, weight='bold')
|
| 243 |
+
ax.text(0.05, 0.95, subplot_names[i], transform=ax.transAxes,
|
| 244 |
+
fontproperties=font_prop,
|
| 245 |
+
color='gray', verticalalignment='top')
|
| 246 |
+
|
| 247 |
+
ax.grid(True, which='both', linestyle='--', linewidth=0.6, color='#d0d6dc')
|
| 248 |
+
for spine in ax.spines.values():
|
| 249 |
+
spine.set_visible(True)
|
| 250 |
+
spine.set_color('black')
|
| 251 |
+
spine.set_linewidth(1.0)
|
| 252 |
+
|
| 253 |
+
ax.xaxis.set_major_locator(FixedLocator(x))
|
| 254 |
+
ax.set_xticklabels(x, fontsize=10)
|
| 255 |
+
ax.tick_params(axis='both', which='both', labelsize=10, direction='out', length=4)
|
| 256 |
+
|
| 257 |
+
# 在第一行第4个位置放统一的图例
|
| 258 |
+
ax_legend = fig.add_subplot(gs[0, 3])
|
| 259 |
+
ax_legend.axis('off') # 关闭坐标轴
|
| 260 |
+
handles, labels = axes[0].get_legend_handles_labels()
|
| 261 |
+
|
| 262 |
+
# legend 放在该子图区域的中间
|
| 263 |
+
legend = ax_legend.legend(
|
| 264 |
+
handles,
|
| 265 |
+
labels,
|
| 266 |
+
loc='center',
|
| 267 |
+
title="Models",
|
| 268 |
+
fontsize=11,
|
| 269 |
+
frameon=True,
|
| 270 |
+
prop={'weight': 'bold', 'family': 'Arial', 'size': 11},
|
| 271 |
+
title_fontsize=14
|
| 272 |
+
)
|
| 273 |
+
legend.get_title().set_fontweight('bold')
|
| 274 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 275 |
+
legend.get_frame().set_linewidth(1.0)
|
| 276 |
+
|
| 277 |
+
# 设置图例文字颜色与线条颜色一致
|
| 278 |
+
for i, text in enumerate(legend.get_texts()):
|
| 279 |
+
model_name = text.get_text()
|
| 280 |
+
text.set_color(model_style[model_name]['color'])
|
| 281 |
+
|
| 282 |
+
plt.tight_layout()
|
| 283 |
+
fig.savefig('/cpfs/user/chenziyang/LongBenchmark/draw/images/bon_line.png', dpi=300, bbox_inches='tight')
|
| 284 |
+
plt.close(fig)
|
draw/tools/context_requirement_bar.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import numpy as np
|
| 3 |
+
from matplotlib import font_manager as fm
|
| 4 |
+
from matplotlib.font_manager import FontProperties
|
| 5 |
+
|
| 6 |
+
# Use the style from strategy_time.py
|
| 7 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 8 |
+
|
| 9 |
+
# Data
|
| 10 |
+
data = {
|
| 11 |
+
"Gemini-2.5-Pro": {"Full": 70.07, "Partial": 77.69},
|
| 12 |
+
"GPT-5": {"Full": 69.16, "Partial": 77.00},
|
| 13 |
+
"Claude-4-Sonnet": {"Full": 66.17, "Partial": 74.59},
|
| 14 |
+
"DeepSeek-V3.2": {"Full": 64.60, "Partial": 71.92},
|
| 15 |
+
"Qwen3-235B-A22B-Thinking-2507": {"Full": 63.47, "Partial": 71.43},
|
| 16 |
+
"GLM-4.6": {"Full": 54.70, "Partial": 62.68},
|
| 17 |
+
"Kimi-K2-Instruct-0905": {"Full": 50.76, "Partial": 61.60},
|
| 18 |
+
"MiniMax-M2": {"Full": 49.38, "Partial": 58.07},
|
| 19 |
+
"Ministral-3-14B-Instruct-2512": {"Full": 42.49, "Partial": 50.01},
|
| 20 |
+
"Llama-3.1-405B-Instruct": {"Full": 37.30, "Partial": 44.94}
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
model_colors = {
|
| 24 |
+
'Gemini-2.5-Pro': '#E64B35',
|
| 25 |
+
'GPT-5': '#4DBBD5',
|
| 26 |
+
'Claude-4-Sonnet': '#00A087',
|
| 27 |
+
'DeepSeek-V3.2': '#F39B7F',
|
| 28 |
+
'Qwen3-235B-A22B-Thinking-2507': '#3C5488',
|
| 29 |
+
'GLM-4.6': '#91D1C2',
|
| 30 |
+
'Kimi-K2-Instruct-0905': '#925E9F',
|
| 31 |
+
'MiniMax-M2': '#8491B4',
|
| 32 |
+
'Ministral-3-14B-Instruct-2512': '#7E6148',
|
| 33 |
+
'Llama-3.1-405B-Instruct': '#B09C85'
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
# Style Configuration (from strategy_time.py)
|
| 37 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 38 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 39 |
+
try:
|
| 40 |
+
fm.fontManager.addfont(font_path)
|
| 41 |
+
fm.fontManager.addfont(font_bold_path)
|
| 42 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 43 |
+
except:
|
| 44 |
+
print("Warning: Arial font not found, using default.")
|
| 45 |
+
|
| 46 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 47 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 48 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 49 |
+
|
| 50 |
+
bold_font = FontProperties(fname=font_bold_path, weight='bold')
|
| 51 |
+
|
| 52 |
+
# Prepare Data
|
| 53 |
+
models = list(data.keys())
|
| 54 |
+
full_scores = [data[m]["Full"] for m in models]
|
| 55 |
+
partial_scores = [data[m]["Partial"] for m in models]
|
| 56 |
+
|
| 57 |
+
x = np.arange(len(models)) # the label locations
|
| 58 |
+
width = 0.35 # the width of the bars
|
| 59 |
+
|
| 60 |
+
# Adjust figure size (referenced from strategy_time.py is 8,6 but we need more width for 10 models)
|
| 61 |
+
fig, ax = plt.subplots(figsize=(12, 6))
|
| 62 |
+
|
| 63 |
+
# Plotting
|
| 64 |
+
# Define scientific colors (Tableau from previous turn, retained as user requested scientific style previously)
|
| 65 |
+
color_full = '#495481' # Tableau Blue
|
| 66 |
+
color_partial = '#e3882f' # Tableau Orange
|
| 67 |
+
|
| 68 |
+
# Full Context Bars (Solid)
|
| 69 |
+
rects1 = ax.bar(x - width/2 - 0.02, full_scores, width, label='Full',
|
| 70 |
+
color=color_full,
|
| 71 |
+
edgecolor='white', linewidth=0.8, alpha=0.9, zorder=3)
|
| 72 |
+
|
| 73 |
+
# Partial Context Bars
|
| 74 |
+
rects2 = ax.bar(x + width/2 + 0.02, partial_scores, width, label='Partial',
|
| 75 |
+
color=color_partial,
|
| 76 |
+
edgecolor='white', linewidth=0.8, alpha=0.9, zorder=3)
|
| 77 |
+
|
| 78 |
+
# Add values on top of bars
|
| 79 |
+
def autolabel(rects, color):
|
| 80 |
+
for rect in rects:
|
| 81 |
+
height = rect.get_height()
|
| 82 |
+
ax.text(rect.get_x() + rect.get_width() / 2., height + 1,
|
| 83 |
+
f'{height:.1f}',
|
| 84 |
+
ha='center', va='bottom', fontsize=10, fontweight='bold', color=color)
|
| 85 |
+
|
| 86 |
+
autolabel(rects1, color_full)
|
| 87 |
+
autolabel(rects2, color_partial)
|
| 88 |
+
|
| 89 |
+
# Customize Axis
|
| 90 |
+
ax.set_xticks(x)
|
| 91 |
+
display_models = [m.replace('Qwen3-235B-A22B-Thinking-2507', 'Qwen3-235B-A22B\n-Thinking-2507')
|
| 92 |
+
.replace('Kimi-K2-Instruct-0905', 'Kimi-K2\n-Instruct-0905')
|
| 93 |
+
.replace('Ministral-3-14B-Instruct-2512', 'Ministral-3-14B\n-Instruct-2512')
|
| 94 |
+
.replace('Llama-3.1-405B-Instruct', 'Llama-3.1-405B\n-Instruct')
|
| 95 |
+
for m in models]
|
| 96 |
+
ax.set_xticklabels(display_models, rotation=20, ha='center', fontsize=11, fontweight='bold')
|
| 97 |
+
|
| 98 |
+
# Color the x-axis labels according to model color
|
| 99 |
+
for tick_label, model in zip(ax.get_xticklabels(), models):
|
| 100 |
+
tick_label.set_color(model_colors.get(model, 'black'))
|
| 101 |
+
|
| 102 |
+
ax.set_ylim(0, 100)
|
| 103 |
+
ax.set_ylabel("Score", fontsize=12, labelpad=12, fontweight='bold', fontfamily='Arial')
|
| 104 |
+
|
| 105 |
+
# Customize Spines (from strategy_time.py)
|
| 106 |
+
for spine in ax.spines.values():
|
| 107 |
+
spine.set_visible(True)
|
| 108 |
+
spine.set_color('black')
|
| 109 |
+
spine.set_linewidth(1.0)
|
| 110 |
+
|
| 111 |
+
# Grid (from strategy_time.py)
|
| 112 |
+
ax.grid(True, axis='y', linestyle='--', linewidth=0.6, color='#d0d6dc', zorder=0)
|
| 113 |
+
ax.set_axisbelow(True)
|
| 114 |
+
|
| 115 |
+
# Tick Params (from strategy_time.py)
|
| 116 |
+
ax.tick_params(axis='both', which='both', labelsize=10, direction='out', length=4)
|
| 117 |
+
|
| 118 |
+
# Custom Legend (matching strategy_time.py style: frameon=True, edgecolor, etc)
|
| 119 |
+
from matplotlib.patches import Patch
|
| 120 |
+
legend_elements = [
|
| 121 |
+
Patch(facecolor=color_full, edgecolor='white', label='Full Context'),
|
| 122 |
+
Patch(facecolor=color_partial, edgecolor='white', label='Partial Context')
|
| 123 |
+
]
|
| 124 |
+
legend = ax.legend(handles=legend_elements, loc='upper right', fontsize=10, frameon=True, prop={'weight': 'bold', 'family': 'Arial'})
|
| 125 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 126 |
+
legend.get_frame().set_linewidth(1.0)
|
| 127 |
+
|
| 128 |
+
plt.tight_layout()
|
| 129 |
+
|
| 130 |
+
# Save
|
| 131 |
+
save_path = '/cpfs/user/chenziyang/LongBenchmark/draw/images/context_requirement_bar.png'
|
| 132 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 133 |
+
plt.close(fig)
|
| 134 |
+
print(f"Chart saved to {save_path}")
|
draw/tools/data_source_distribution.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
import json
|
| 5 |
+
from matplotlib import font_manager as fm
|
| 6 |
+
from matplotlib.font_manager import FontProperties
|
| 7 |
+
|
| 8 |
+
font_path = "/usr/share/fonts/truetype/msttcorefonts/Arial.ttf"
|
| 9 |
+
font_bold_path = "/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf"
|
| 10 |
+
fm.fontManager.addfont(font_path)
|
| 11 |
+
fm.fontManager.addfont(font_bold_path)
|
| 12 |
+
plt.rcParams["font.family"] = "Arial"
|
| 13 |
+
plt.rcParams["font.weight"] = "bold"
|
| 14 |
+
plt.rcParams["axes.labelweight"] = "bold"
|
| 15 |
+
plt.rcParams["axes.titleweight"] = "bold"
|
| 16 |
+
plt.rcParams["font.size"] = 12
|
| 17 |
+
plt.rcParams["xtick.labelsize"] = 12
|
| 18 |
+
plt.rcParams["ytick.labelsize"] = 12
|
| 19 |
+
bold_font = FontProperties(fname=font_bold_path, weight="bold")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
'''
|
| 23 |
+
数据获取和统计
|
| 24 |
+
'''
|
| 25 |
+
# with open("/cpfs/user/chenziyang/LongBenchmark/eval/dataset/longbench_pro_final.json", "r") as f:
|
| 26 |
+
# data = json.load(f)
|
| 27 |
+
# print(len(data))
|
| 28 |
+
# en, zh = 0, 0
|
| 29 |
+
# full, partial = 0, 0
|
| 30 |
+
# k8, k16, k32, k64, k128, k256 = 0, 0, 0, 0, 0, 0
|
| 31 |
+
# extreme, hard, moderate, easy = 0, 0, 0, 0
|
| 32 |
+
# for d in data:
|
| 33 |
+
# if d["language"] == "English":
|
| 34 |
+
# en += 1
|
| 35 |
+
# elif d["language"] == "Chinese":
|
| 36 |
+
# zh += 1
|
| 37 |
+
# if d["contextual_requirement"] == "Full":
|
| 38 |
+
# full += 1
|
| 39 |
+
# elif d["contextual_requirement"] == "Partial":
|
| 40 |
+
# partial += 1
|
| 41 |
+
# if d["token_length"] == "8k":
|
| 42 |
+
# k8 += 1
|
| 43 |
+
# elif d["token_length"] == "16k":
|
| 44 |
+
# k16 += 1
|
| 45 |
+
# elif d["token_length"] == "32k":
|
| 46 |
+
# k32 += 1
|
| 47 |
+
# elif d["token_length"] == "64k":
|
| 48 |
+
# k64 += 1
|
| 49 |
+
# elif d["token_length"] == "128k":
|
| 50 |
+
# k128 += 1
|
| 51 |
+
# elif d["token_length"] == "256k":
|
| 52 |
+
# k256 += 1
|
| 53 |
+
# if d["difficulty"] == "Extreme":
|
| 54 |
+
# extreme += 1
|
| 55 |
+
# elif d["difficulty"] == "Hard":
|
| 56 |
+
# hard += 1
|
| 57 |
+
# elif d["difficulty"] == "Moderate":
|
| 58 |
+
# moderate += 1
|
| 59 |
+
# elif d["difficulty"] == "Easy":
|
| 60 |
+
# easy += 1
|
| 61 |
+
# print(f"English: {en}, Chinese: {zh}")
|
| 62 |
+
# print(f"Full: {full}, Partial: {partial}")
|
| 63 |
+
# print(f"k8: {k8}, k16: {k16}, k32: {k32}, k64: {k64}, k128: {k128}, k256: {k256}")
|
| 64 |
+
# print(f"extreme: {extreme}, hard: {hard}, moderate: {moderate}, easy: {easy}")
|
| 65 |
+
|
| 66 |
+
# from collections import Counter
|
| 67 |
+
# with open("/cpfs/user/chenziyang/LongBenchmark/eval/dataset/doc_type/doc_type_stat_final_patch.json", "r") as f:
|
| 68 |
+
# data = json.load(f)
|
| 69 |
+
# print(len(data))
|
| 70 |
+
# text_type_conter = Counter()
|
| 71 |
+
# for item in data:
|
| 72 |
+
# text_type_conter[item["doc_type"]] += 1
|
| 73 |
+
# print(text_type_conter)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# ---------------------------
|
| 77 |
+
# Example data (replace with your real values)
|
| 78 |
+
# ---------------------------
|
| 79 |
+
data = {
|
| 80 |
+
"Difficulty": {"Extreme": 440, "Hard": 289, "Moderate": 289, "Easy": 482},
|
| 81 |
+
"Length": {"8k": 250, "16k": 250, "32k": 250, "64k": 250, "128k": 250, "256k": 250},
|
| 82 |
+
"Context\nRequirement": {"Full": 840, "Partial": 660},
|
| 83 |
+
"Language": {"English": 750, "Chinese": 750},
|
| 84 |
+
"Text Type": {
|
| 85 |
+
"Literature (Novel)": 320,
|
| 86 |
+
"Literature (Other)": 179,
|
| 87 |
+
"Law": 176,
|
| 88 |
+
"Social": 121,
|
| 89 |
+
"Finance": 115,
|
| 90 |
+
"Technology": 113,
|
| 91 |
+
"Science": 107,
|
| 92 |
+
"History": 103,
|
| 93 |
+
"News": 69,
|
| 94 |
+
"Policy": 55,
|
| 95 |
+
"Education": 51,
|
| 96 |
+
"Structured": 41,
|
| 97 |
+
"Medicine": 27,
|
| 98 |
+
"Other": 23,
|
| 99 |
+
},
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
# ---------------------------
|
| 103 |
+
# Convert to dataframe and compute percentages
|
| 104 |
+
# ---------------------------
|
| 105 |
+
all_categories = []
|
| 106 |
+
for dimension_data in data.values():
|
| 107 |
+
for category in dimension_data.keys():
|
| 108 |
+
if category not in all_categories:
|
| 109 |
+
all_categories.append(category)
|
| 110 |
+
|
| 111 |
+
df = pd.DataFrame([{c: d.get(c, 0) for c in all_categories} for d in data.values()],
|
| 112 |
+
index=list(data.keys()), columns=all_categories)
|
| 113 |
+
difficulty_categories = set(data["Difficulty"].keys())
|
| 114 |
+
|
| 115 |
+
row_sums = df.sum(axis=1)
|
| 116 |
+
|
| 117 |
+
# ---------------------------
|
| 118 |
+
# Plot
|
| 119 |
+
# ---------------------------
|
| 120 |
+
fig, ax = plt.subplots(figsize=(16, 4)) # compact wide layout
|
| 121 |
+
|
| 122 |
+
y_spacing = 0.8 # reduce gap between bars
|
| 123 |
+
y_pos = np.arange(len(df)) * y_spacing
|
| 124 |
+
left = np.zeros(len(df))
|
| 125 |
+
|
| 126 |
+
# Use a discrete colormap
|
| 127 |
+
category_colors = {
|
| 128 |
+
# Difficulty
|
| 129 |
+
"Extreme": "#DA7B89",
|
| 130 |
+
"Hard": "#E6C875",
|
| 131 |
+
"Moderate": "#C5DB80",
|
| 132 |
+
"Easy": "#92C56A",
|
| 133 |
+
# Length
|
| 134 |
+
"8k": "#d2e3f3",
|
| 135 |
+
"16k": "#aacee4",
|
| 136 |
+
"32k": "#68abd5",
|
| 137 |
+
"64k": "#3788bf",
|
| 138 |
+
"128k": "#0f5ba6",
|
| 139 |
+
"256k": "#09336f",
|
| 140 |
+
# Context Requirement
|
| 141 |
+
"Full": "#495481",
|
| 142 |
+
"Partial": "#e3882f",
|
| 143 |
+
# Language
|
| 144 |
+
"English": "#80a492",
|
| 145 |
+
"Chinese": "#d23918",
|
| 146 |
+
# Text Type
|
| 147 |
+
"Literature (Novel)": "#393B79",
|
| 148 |
+
"Law": "#5254A3",
|
| 149 |
+
"Literature (Other)": "#6B6ECF",
|
| 150 |
+
"Social": "#9C9EDE",
|
| 151 |
+
"Technology": "#637939",
|
| 152 |
+
"Finance": "#8CA252",
|
| 153 |
+
"Science": "#B5CF6B",
|
| 154 |
+
"History": "#CEDB9C",
|
| 155 |
+
"News": "#8C6D31",
|
| 156 |
+
"Policy": "#BD9E39",
|
| 157 |
+
"Education": "#E7BA52",
|
| 158 |
+
"Structured": "#E7CB94",
|
| 159 |
+
"Medicine": "#843C39",
|
| 160 |
+
"Other": "#AD494A",
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
for i, cat in enumerate(all_categories):
|
| 164 |
+
values = df[cat].values
|
| 165 |
+
bar_color = category_colors[cat]
|
| 166 |
+
ax.barh(y_pos, values, left=left, color=bar_color, edgecolor="white", height=0.45)
|
| 167 |
+
|
| 168 |
+
# put category name INSIDE bar if segment > threshold
|
| 169 |
+
for j, val in enumerate(values):
|
| 170 |
+
if val >= 52: # threshold to avoid clutter
|
| 171 |
+
percentage = (val / row_sums[j]) * 100 if row_sums[j] > 0 else 0
|
| 172 |
+
text_x = left[j] + val / 2
|
| 173 |
+
text_y = y_pos[j]
|
| 174 |
+
text_color = "white"
|
| 175 |
+
ax.text(text_x, text_y + 0.08, cat,
|
| 176 |
+
va="center", ha="center", fontsize=10, color=text_color,
|
| 177 |
+
fontproperties=bold_font)
|
| 178 |
+
ax.text(text_x, text_y - 0.08, f"{percentage:.1f}%",
|
| 179 |
+
va="center", ha="center", fontsize=8, color=text_color,
|
| 180 |
+
fontproperties=bold_font)
|
| 181 |
+
|
| 182 |
+
left += values
|
| 183 |
+
|
| 184 |
+
# ---------------------------
|
| 185 |
+
# Formatting
|
| 186 |
+
# ---------------------------
|
| 187 |
+
ax.set_yticks(y_pos)
|
| 188 |
+
ax.set_yticklabels(df.index, fontsize=12, fontweight="bold")
|
| 189 |
+
for label in ax.get_yticklabels():
|
| 190 |
+
label.set_horizontalalignment("center")
|
| 191 |
+
label.set_fontproperties(bold_font)
|
| 192 |
+
ax.tick_params(axis='y', pad=40, labelsize=12)
|
| 193 |
+
ax.set_xlim(0, max(row_sums))
|
| 194 |
+
xlabel = ax.set_xlabel("Percentage of Samples (Total Samples = 1,500)", fontsize=12, fontweight="bold")
|
| 195 |
+
xlabel.set_fontproperties(bold_font)
|
| 196 |
+
ax.set_xticks([])
|
| 197 |
+
ax.tick_params(axis='x', bottom=False, labelbottom=False, labelsize=12)
|
| 198 |
+
|
| 199 |
+
# No legend
|
| 200 |
+
ax.legend().remove()
|
| 201 |
+
|
| 202 |
+
plt.tight_layout()
|
| 203 |
+
plt.show()
|
| 204 |
+
|
| 205 |
+
save_path = "/cpfs/user/chenziyang/LongBenchmark/draw/images/data_source_distribution.png"
|
| 206 |
+
plt.savefig(save_path, dpi=600, bbox_inches='tight', facecolor='white', edgecolor='none')
|
| 207 |
+
print(f"Figure saved to: {save_path}")
|
draw/tools/fitst_image.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import numpy as np
|
| 3 |
+
from matplotlib import font_manager as fm
|
| 4 |
+
from matplotlib.font_manager import FontProperties
|
| 5 |
+
from matplotlib.patches import Patch
|
| 6 |
+
|
| 7 |
+
# Use the style from strategy_time.py
|
| 8 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 9 |
+
|
| 10 |
+
# Data
|
| 11 |
+
data = {
|
| 12 |
+
"Gemini-2.5-Pro": {"Overall": 73.42, "Extreme": 50.77},
|
| 13 |
+
"GPT-5": {"Overall": 72.61, "Extreme": 48.37},
|
| 14 |
+
"Claude-4-Sonnet": {"Overall": 69.87, "Extreme": 47.05},
|
| 15 |
+
"DeepSeek-V3.2": {"Overall": 67.82, "Extreme": 44.27},
|
| 16 |
+
"Qwen3-235B-A22B-Thinking-2507": {"Overall": 66.97, "Extreme": 43.39},
|
| 17 |
+
"GLM-4.6": {"Overall": 58.21, "Extreme": 38.88},
|
| 18 |
+
"Kimi-K2-Instruct-0905": {"Overall": 55.53, "Extreme": 38.25},
|
| 19 |
+
"MiniMax-M2": {"Overall": 53.21, "Extreme": 34.98},
|
| 20 |
+
"Ministral-3-14B-Instruct-2512": {"Overall": 45.80, "Extreme": 31.66},
|
| 21 |
+
"Llama-3.1-405B-Instruct": {"Overall": 40.66, "Extreme": 29.81}
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
model_colors = {
|
| 25 |
+
"Gemini-2.5-Pro": '#E64B35',
|
| 26 |
+
"GPT-5": '#4DBBD5',
|
| 27 |
+
"Claude-4-Sonnet": '#00A087',
|
| 28 |
+
"DeepSeek-V3.2": '#F39B7F',
|
| 29 |
+
"Qwen3-235B-A22B-Thinking-2507": '#3C5488',
|
| 30 |
+
"GLM-4.6": '#91D1C2',
|
| 31 |
+
"Kimi-K2-Instruct-0905": '#925E9F',
|
| 32 |
+
"MiniMax-M2": '#8491B4',
|
| 33 |
+
"Ministral-3-14B-Instruct-2512": '#7E6148',
|
| 34 |
+
"Llama-3.1-405B-Instruct": '#B09C85'
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
# Style Configuration
|
| 38 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 39 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 40 |
+
try:
|
| 41 |
+
fm.fontManager.addfont(font_path)
|
| 42 |
+
fm.fontManager.addfont(font_bold_path)
|
| 43 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 44 |
+
except:
|
| 45 |
+
print("Warning: Arial font not found, using default.")
|
| 46 |
+
|
| 47 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 48 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 49 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 50 |
+
|
| 51 |
+
# Prepare Data
|
| 52 |
+
models = list(data.keys())
|
| 53 |
+
overall_scores = [data[m]["Overall"] for m in models]
|
| 54 |
+
extreme_scores = [data[m]["Extreme"] for m in models]
|
| 55 |
+
|
| 56 |
+
x = np.arange(len(models))
|
| 57 |
+
width_overall = 0.6
|
| 58 |
+
width_extreme = 0.3
|
| 59 |
+
|
| 60 |
+
# Plotting
|
| 61 |
+
fig, ax = plt.subplots(figsize=(14, 6))
|
| 62 |
+
|
| 63 |
+
# Colors
|
| 64 |
+
bar_colors = [model_colors[m] for m in models]
|
| 65 |
+
|
| 66 |
+
# Overall Bars (Wide)
|
| 67 |
+
rects1 = ax.bar(x, overall_scores, width_overall, label='Overall',
|
| 68 |
+
color=bar_colors,
|
| 69 |
+
edgecolor='white', linewidth=0.8, alpha=0.4, zorder=2)
|
| 70 |
+
|
| 71 |
+
# Extreme Bars (Narrow, nested)
|
| 72 |
+
rects2 = ax.bar(x, extreme_scores, width_extreme, label='Extreme',
|
| 73 |
+
color=bar_colors,
|
| 74 |
+
edgecolor='white', linewidth=0.8, alpha=1.0, zorder=3)
|
| 75 |
+
|
| 76 |
+
# Add values on top of bars
|
| 77 |
+
def autolabel(rects, colors, y_offset=0):
|
| 78 |
+
for rect, color in zip(rects, colors):
|
| 79 |
+
height = rect.get_height()
|
| 80 |
+
ax.text(rect.get_x() + rect.get_width() / 2., height + y_offset + 1,
|
| 81 |
+
f'{height:.1f}',
|
| 82 |
+
ha='center', va='bottom', fontsize=10, fontweight='bold', color=color)
|
| 83 |
+
|
| 84 |
+
autolabel(rects1, bar_colors)
|
| 85 |
+
autolabel(rects2, bar_colors)
|
| 86 |
+
|
| 87 |
+
# Customize Axis
|
| 88 |
+
ax.set_xticks(x)
|
| 89 |
+
display_models = [m.replace('Qwen3-235B-A22B-Thinking-2507', 'Qwen3-235B-A22B\n-Thinking-2507')
|
| 90 |
+
.replace('Kimi-K2-Instruct-0905', 'Kimi-K2\n-Instruct-0905')
|
| 91 |
+
.replace('Ministral-3-14B-Instruct-2512', 'Ministral-3-14B\n-Instruct-2512')
|
| 92 |
+
.replace('Llama-3.1-405B-Instruct', 'Llama-3.1-405B\n-Instruct')
|
| 93 |
+
for m in models]
|
| 94 |
+
ax.set_xticklabels(display_models, rotation=0, ha='center', va='center', fontsize=11, fontweight='bold')
|
| 95 |
+
ax.tick_params(axis='x', pad=18)
|
| 96 |
+
|
| 97 |
+
# Color the x-axis labels according to model color
|
| 98 |
+
for tick_label, model in zip(ax.get_xticklabels(), models):
|
| 99 |
+
tick_label.set_color(model_colors.get(model, 'black'))
|
| 100 |
+
|
| 101 |
+
ax.set_ylim(0, 100)
|
| 102 |
+
ax.set_ylabel("Score", fontsize=12, labelpad=12, fontweight='bold', fontfamily='Arial')
|
| 103 |
+
|
| 104 |
+
# Customize Spines
|
| 105 |
+
for spine in ['top', 'right']:
|
| 106 |
+
ax.spines[spine].set_visible(False)
|
| 107 |
+
for spine in ['left', 'bottom']:
|
| 108 |
+
ax.spines[spine].set_visible(True)
|
| 109 |
+
ax.spines[spine].set_color('#808080')
|
| 110 |
+
ax.spines[spine].set_linewidth(1.0)
|
| 111 |
+
|
| 112 |
+
# Grid
|
| 113 |
+
ax.grid(True, axis='y', linestyle='--', linewidth=0.6, color='#d0d6dc', zorder=0)
|
| 114 |
+
ax.set_axisbelow(True)
|
| 115 |
+
|
| 116 |
+
# Tick Params
|
| 117 |
+
ax.tick_params(axis='both', which='both', labelsize=10, direction='out', length=4)
|
| 118 |
+
|
| 119 |
+
# Custom Legend
|
| 120 |
+
legend_elements = [
|
| 121 |
+
Patch(facecolor='gray', alpha=0.4, edgecolor='white', label='Complete Benchmark'),
|
| 122 |
+
Patch(facecolor='gray', alpha=1.0, edgecolor='white', label='Extreme-Difficulty Subset')
|
| 123 |
+
]
|
| 124 |
+
legend = ax.legend(handles=legend_elements, loc='upper right', fontsize=10, frameon=True, prop={'weight': 'bold', 'family': 'Arial'})
|
| 125 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 126 |
+
legend.get_frame().set_linewidth(1.0)
|
| 127 |
+
|
| 128 |
+
plt.tight_layout()
|
| 129 |
+
|
| 130 |
+
# Save
|
| 131 |
+
save_path = '/cpfs/user/chenziyang/LongBenchmark/draw/images/fitst_image.png'
|
| 132 |
+
plt.savefig(save_path, dpi=600, bbox_inches='tight')
|
| 133 |
+
print(f"Chart saved to {save_path}")
|
draw/tools/length_heatmap.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
from matplotlib import font_manager as fm
|
| 5 |
+
from matplotlib.colors import LinearSegmentedColormap
|
| 6 |
+
|
| 7 |
+
# Data
|
| 8 |
+
data = {
|
| 9 |
+
"Gemini-2.5-Pro": {"8k": 74.5, "16k": 74.79, "32k": 75.31, "64k": 74.18, "128k": 70, "256k": 71.77},
|
| 10 |
+
"GPT-5": {"8k": 75.37, "16k": 76.27, "32k": 74.34, "64k": 76.46, "128k": 69.36, "256k": 63.82},
|
| 11 |
+
"Claude-4-Sonnet": {"8k": 72.73, "16k": 71.48, "32k": 72.82, "64k": 70.52, "128k": 66.43, "256k": 65.26},
|
| 12 |
+
"DeepSeek-V3.2": {"8k": 75.54, "16k": 74.49, "32k": 69.53, "64k": 69.47, "128k": 64.77, "256k": 53.12},
|
| 13 |
+
"Qwen3-235B-A22B\n-Thinking-2507": {"8k": 72.06, "16k": 70.43, "32k": 69.69, "64k": 66.85, "128k": 64.05, "256k": 58.77},
|
| 14 |
+
"GLM-4.6": {"8k": 71.23, "16k": 66.04, "32k": 63.53, "64k": 58.97, "128k": 47.55, "256k": 41.95},
|
| 15 |
+
"Kimi-K2\n-Instruct-0905": {"8k": 59.79, "16k": 58.17, "32k": 58.73, "64k": 53.61, "128k": 52.29, "256k": 50.61},
|
| 16 |
+
"MiniMax-M2": {"8k": 65.48, "16k": 58.32, "32k": 58.31, "64k": 52.02, "128k": 50.61, "256k": 34.51},
|
| 17 |
+
"Ministral-3-14B\n-Instruct-2512": {"8k": 51.88, "16k": 48.52, "32k": 48.75, "64k": 45.7, "128k": 42.36, "256k": 37.59},
|
| 18 |
+
"Llama-3.1-405B\n-Instruct": {"8k": 52.38, "16k": 51.8, "32k": 46.41, "64k": 41.82, "128k": 26.01, "256k": 25.54}
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
model_colors = {
|
| 22 |
+
'Gemini-2.5-Pro': '#E64B35',
|
| 23 |
+
'GPT-5': '#4DBBD5',
|
| 24 |
+
'Claude-4-Sonnet': '#00A087',
|
| 25 |
+
'DeepSeek-V3.2': '#F39B7F',
|
| 26 |
+
'Qwen3-235B-A22B\n-Thinking-2507': '#3C5488',
|
| 27 |
+
'GLM-4.6': '#91D1C2',
|
| 28 |
+
'Kimi-K2\n-Instruct-0905': '#925E9F',
|
| 29 |
+
'MiniMax-M2': '#8491B4',
|
| 30 |
+
'Ministral-3-14B\n-Instruct-2512': '#7E6148',
|
| 31 |
+
'Llama-3.1-405B\n-Instruct': '#B09C85'
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
# Style Configuration
|
| 35 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 36 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 37 |
+
try:
|
| 38 |
+
fm.fontManager.addfont(font_path)
|
| 39 |
+
fm.fontManager.addfont(font_bold_path)
|
| 40 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 41 |
+
except:
|
| 42 |
+
print("Warning: Arial font not found, using default.")
|
| 43 |
+
|
| 44 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 45 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 46 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 47 |
+
|
| 48 |
+
# Prepare Data
|
| 49 |
+
df = pd.DataFrame(data).T
|
| 50 |
+
cols = ["8k", "16k", "32k", "64k", "128k", "256k"]
|
| 51 |
+
df = df[cols]
|
| 52 |
+
|
| 53 |
+
data_values = df.values
|
| 54 |
+
models = df.index.tolist()
|
| 55 |
+
lengths = df.columns.tolist()
|
| 56 |
+
|
| 57 |
+
# Define Custom "Premium" Colormap
|
| 58 |
+
# Transition: Soft Muted Red -> Pale Cream/Yellow -> Soft Muted Green
|
| 59 |
+
# Colors selected for a cleaner, more professional look
|
| 60 |
+
cmap_colors = ["#EE6666", "#FDE7A9", "#74C476"]
|
| 61 |
+
# #EE6666: Soft Red
|
| 62 |
+
# #FDE7A9: Light Warm Yellow
|
| 63 |
+
# #74C476: Soft Green
|
| 64 |
+
cmap = LinearSegmentedColormap.from_list("premium_rdylgn", cmap_colors, N=256)
|
| 65 |
+
|
| 66 |
+
# Plot
|
| 67 |
+
fig, ax = plt.subplots(figsize=(10, 8))
|
| 68 |
+
|
| 69 |
+
# Create heatmap
|
| 70 |
+
# vmin=0, vmax=100 sets the anchor points for the colors
|
| 71 |
+
im = ax.imshow(data_values, cmap=cmap, vmin=0, vmax=100, aspect='auto')
|
| 72 |
+
|
| 73 |
+
# Colorbar
|
| 74 |
+
cbar = ax.figure.colorbar(im, ax=ax, pad=0.02)
|
| 75 |
+
cbar.ax.set_ylabel("Score", rotation=-90, va="bottom", weight='bold', fontsize=10)
|
| 76 |
+
cbar.outline.set_linewidth(0) # Remove colorbar border for cleaner look
|
| 77 |
+
|
| 78 |
+
# Show all ticks and label them with the respective list entries
|
| 79 |
+
ax.set_xticks(np.arange(len(lengths)))
|
| 80 |
+
ax.set_yticks(np.arange(len(models)))
|
| 81 |
+
ax.set_xticklabels(lengths, fontsize=10, fontweight='bold')
|
| 82 |
+
ax.set_yticklabels(models, fontsize=10, fontweight='bold', va='center', ma='center', ha='center')
|
| 83 |
+
|
| 84 |
+
# Customize tick label colors
|
| 85 |
+
for label in ax.get_yticklabels():
|
| 86 |
+
model_name = label.get_text()
|
| 87 |
+
if model_name in model_colors:
|
| 88 |
+
label.set_color(model_colors[model_name])
|
| 89 |
+
|
| 90 |
+
# Rotate the tick labels and set their alignment.
|
| 91 |
+
plt.setp(ax.get_xticklabels(), rotation=0, ha="center", rotation_mode="anchor")
|
| 92 |
+
|
| 93 |
+
# Loop over data dimensions and create text annotations.
|
| 94 |
+
for i in range(len(models)):
|
| 95 |
+
for j in range(len(lengths)):
|
| 96 |
+
val = data_values[i, j]
|
| 97 |
+
# Determine text color based on value for better contrast
|
| 98 |
+
# Darker text on middle (yellow) values, lighter on dark red/green if needed
|
| 99 |
+
# But with this palette, black text works well on all
|
| 100 |
+
text_color = "black"
|
| 101 |
+
if val < 20 or val > 80:
|
| 102 |
+
# Optional: use white text for extremes if colors are very dark
|
| 103 |
+
# With current soft palette, black is likely fine.
|
| 104 |
+
# Let's stick to black for uniformity and sharpness.
|
| 105 |
+
pass
|
| 106 |
+
|
| 107 |
+
text = ax.text(j, i, f"{val:.2f}",
|
| 108 |
+
ha="center", va="center", color="#333333", # Dark gray instead of pure black
|
| 109 |
+
fontsize=10, fontweight='bold')
|
| 110 |
+
|
| 111 |
+
ax.set_xlabel("Sample Length", fontsize=12, labelpad=12, fontweight='bold')
|
| 112 |
+
ax.set_ylabel("Model", fontsize=12, labelpad=12, fontweight='bold')
|
| 113 |
+
|
| 114 |
+
# Remove spines for a cleaner look
|
| 115 |
+
for spine in ax.spines.values():
|
| 116 |
+
spine.set_visible(False)
|
| 117 |
+
|
| 118 |
+
# Grid lines - white lines to separate cells
|
| 119 |
+
ax.set_xticks(np.arange(data_values.shape[1]+1)-.5, minor=True)
|
| 120 |
+
ax.set_yticks(np.arange(data_values.shape[0]+1)-.5, minor=True)
|
| 121 |
+
ax.grid(which="minor", color="white", linestyle='-', linewidth=2) # Thicker white grid
|
| 122 |
+
ax.tick_params(which="minor", bottom=False, left=False)
|
| 123 |
+
ax.tick_params(which="major", bottom=False, left=False) # Hide tick marks
|
| 124 |
+
ax.tick_params(axis='y', pad=55) # Adjust padding for centered labels
|
| 125 |
+
|
| 126 |
+
plt.tight_layout()
|
| 127 |
+
|
| 128 |
+
# Save
|
| 129 |
+
save_path = '/cpfs/user/chenziyang/LongBenchmark/draw/images/length_heatmap.png'
|
| 130 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 131 |
+
print(f"Heatmap saved to {save_path}")
|
draw/tools/passn_line.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import os
|
| 2 |
+
# import json
|
| 3 |
+
|
| 4 |
+
# def find_file(model_name):
|
| 5 |
+
# file_root = os.path.join(result_root, model_name)
|
| 6 |
+
# for file in os.listdir(file_root):
|
| 7 |
+
# if file.startswith("thinking") and file.endswith("_evaluation.jsonl"):
|
| 8 |
+
# return os.path.join(file_root, file)
|
| 9 |
+
# raise ValueError(f"No file found for model {model_name}")
|
| 10 |
+
|
| 11 |
+
# def load_jsonl(file_path):
|
| 12 |
+
# with open(file_path, 'r', encoding='utf-8') as f:
|
| 13 |
+
# return [json.loads(line, strict=False) for line in f]
|
| 14 |
+
|
| 15 |
+
# def calculate_pass_n(data, n, difficulty_filter=None, language_filter=None):
|
| 16 |
+
# id2maxitem = {}
|
| 17 |
+
# for item in data:
|
| 18 |
+
# if difficulty_filter is not None and item['difficulty'] != difficulty_filter:
|
| 19 |
+
# continue
|
| 20 |
+
# if language_filter is not None and item['language'] != language_filter:
|
| 21 |
+
# continue
|
| 22 |
+
# if item['bon_idx'] > n:
|
| 23 |
+
# continue
|
| 24 |
+
# _id = item['id']
|
| 25 |
+
# metric = item['metric']
|
| 26 |
+
# if _id not in id2maxitem or metric > id2maxitem[_id]['metric']:
|
| 27 |
+
# id2maxitem[_id] = item
|
| 28 |
+
# if len(id2maxitem) == 0:
|
| 29 |
+
# return 0.0 # 如果没有数据,返回0
|
| 30 |
+
# pass_num = 0
|
| 31 |
+
# for _id, item in id2maxitem.items():
|
| 32 |
+
# if "T4" in item['primary_task']:
|
| 33 |
+
# if item['metric'] > 0.65:
|
| 34 |
+
# pass_num += 1
|
| 35 |
+
# else:
|
| 36 |
+
# if item['metric'] == 1.0:
|
| 37 |
+
# pass_num += 1
|
| 38 |
+
# return pass_num / len(id2maxitem)
|
| 39 |
+
|
| 40 |
+
# model_names = [
|
| 41 |
+
# "Gemini-2.5-Pro",
|
| 42 |
+
# "GPT-5",
|
| 43 |
+
# "Claude-4-Sonnet",
|
| 44 |
+
# "DeepSeek-V3.2",
|
| 45 |
+
# "Qwen3-235B-A22B-Thinking-2507",
|
| 46 |
+
# "GLM-4.6",
|
| 47 |
+
# "Kimi-K2-Instruct-0905",
|
| 48 |
+
# "MiniMax-M2",
|
| 49 |
+
# "Ministral-3-14B-Instruct-2512",
|
| 50 |
+
# "Llama-3.1-405B-Instruct"
|
| 51 |
+
# ]
|
| 52 |
+
|
| 53 |
+
# for model_name in model_names:
|
| 54 |
+
# print(model_name)
|
| 55 |
+
# result_root = "/cpfs/user/chenziyang/LongBenchmark/eval/output/final"
|
| 56 |
+
# file_path = find_file(model_name)
|
| 57 |
+
# data = load_jsonl(file_path)
|
| 58 |
+
|
| 59 |
+
# print("Overall")
|
| 60 |
+
# for n in range(1, 4):
|
| 61 |
+
# print(round(calculate_pass_n(data, n) * 100, 2))
|
| 62 |
+
# print("English")
|
| 63 |
+
# for n in range(1, 4):
|
| 64 |
+
# print(round(calculate_pass_n(data, n, language_filter="English") * 100, 2))
|
| 65 |
+
# print("Chinese")
|
| 66 |
+
# for n in range(1, 4):
|
| 67 |
+
# print(round(calculate_pass_n(data, n, language_filter="Chinese") * 100, 2))
|
| 68 |
+
# print("Extreme")
|
| 69 |
+
# for n in range(1, 4):
|
| 70 |
+
# print(round(calculate_pass_n(data, n, difficulty_filter="Extreme") * 100, 2))
|
| 71 |
+
# print("Hard")
|
| 72 |
+
# for n in range(1, 4):
|
| 73 |
+
# print(round(calculate_pass_n(data, n, difficulty_filter="Hard") * 100, 2))
|
| 74 |
+
# print("Moderate")
|
| 75 |
+
# for n in range(1, 4):
|
| 76 |
+
# print(round(calculate_pass_n(data, n, difficulty_filter="Moderate") * 100, 2))
|
| 77 |
+
# print("Easy")
|
| 78 |
+
# for n in range(1, 4):
|
| 79 |
+
# print(round(calculate_pass_n(data, n, difficulty_filter="Easy") * 100, 2))
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
import matplotlib.pyplot as plt
|
| 84 |
+
import numpy as np
|
| 85 |
+
from matplotlib import font_manager as fm
|
| 86 |
+
from matplotlib.font_manager import FontProperties
|
| 87 |
+
from matplotlib.ticker import FixedLocator
|
| 88 |
+
|
| 89 |
+
# 设置风格
|
| 90 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 91 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 92 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 93 |
+
try:
|
| 94 |
+
fm.fontManager.addfont(font_path)
|
| 95 |
+
fm.fontManager.addfont(font_bold_path)
|
| 96 |
+
except Exception:
|
| 97 |
+
pass
|
| 98 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 99 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 100 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 101 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 102 |
+
|
| 103 |
+
# 模拟数据:每个模型3个数据点
|
| 104 |
+
num_points = 3
|
| 105 |
+
x = np.arange(1, num_points + 1)
|
| 106 |
+
|
| 107 |
+
subplot_names = [
|
| 108 |
+
"Overall",
|
| 109 |
+
"English",
|
| 110 |
+
"Chinese",
|
| 111 |
+
"Extreme",
|
| 112 |
+
"Hard",
|
| 113 |
+
"Moderate",
|
| 114 |
+
"Easy",
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
# 模型样式定义:颜色和标记
|
| 118 |
+
model_style = {
|
| 119 |
+
'Gemini-2.5-Pro': {'color': '#E64B35', 'marker': 'o'},
|
| 120 |
+
'GPT-5': {'color': '#4DBBD5', 'marker': 's'},
|
| 121 |
+
'Claude-4-Sonnet': {'color': '#00A087', 'marker': '^'},
|
| 122 |
+
'DeepSeek-V3.2': {'color': '#F39B7F', 'marker': 'v'},
|
| 123 |
+
'Qwen3-235B-A22B-Thinking-2507': {'color': '#3C5488', 'marker': 'D'},
|
| 124 |
+
'GLM-4.6': {'color': '#91D1C2', 'marker': '<'},
|
| 125 |
+
'Kimi-K2-Instruct-0905': {'color': '#925E9F', 'marker': '>'},
|
| 126 |
+
'MiniMax-M2': {'color': '#8491B4', 'marker': 'p'},
|
| 127 |
+
'Ministral-3-14B-Instruct-2512': {'color': '#7E6148', 'marker': 'h'},
|
| 128 |
+
'Llama-3.1-405B-Instruct': {'color': '#B09C85', 'marker': '*'},
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
data = {
|
| 132 |
+
"Gemini-2.5-Pro": {
|
| 133 |
+
"Overall": [53.67, 61.33, 64.67],
|
| 134 |
+
"English": [53.20, 61.73, 64.13],
|
| 135 |
+
"Chinese": [54.13, 60.93, 65.20],
|
| 136 |
+
"Extreme": [5.45, 8.41, 10.68],
|
| 137 |
+
"Hard": [57.44, 74.05, 82.70],
|
| 138 |
+
"Moderate": [76.47, 85.12, 87.20],
|
| 139 |
+
"Easy": [81.74, 87.76, 89.63],
|
| 140 |
+
},
|
| 141 |
+
"GPT-5": {
|
| 142 |
+
"Overall": [50.33, 57.73, 61.07],
|
| 143 |
+
"English": [50.53, 59.07, 62.93],
|
| 144 |
+
"Chinese": [50.13, 56.40, 59.20],
|
| 145 |
+
"Extreme": [3.41, 5.23, 6.36],
|
| 146 |
+
"Hard": [56.40, 69.55, 74.74],
|
| 147 |
+
"Moderate": [69.20, 78.55, 83.74],
|
| 148 |
+
"Easy": [78.22, 86.10, 89.21],
|
| 149 |
+
},
|
| 150 |
+
"Claude-4-Sonnet": {
|
| 151 |
+
"Overall": [47.87, 55.87, 59.80],
|
| 152 |
+
"English": [48.80, 57.47, 61.33],
|
| 153 |
+
"Chinese": [46.93, 54.27, 58.27],
|
| 154 |
+
"Extreme": [2.50, 3.86, 5.00],
|
| 155 |
+
"Hard": [45.67, 59.52, 69.20],
|
| 156 |
+
"Moderate": [62.28, 77.85, 82.35],
|
| 157 |
+
"Easy": [81.95, 87.97, 90.66],
|
| 158 |
+
},
|
| 159 |
+
"DeepSeek-V3.2": {
|
| 160 |
+
"Overall": [44.20, 52.87, 57.20],
|
| 161 |
+
"English": [45.20, 53.20, 57.47],
|
| 162 |
+
"Chinese": [43.20, 52.53, 56.93],
|
| 163 |
+
"Extreme": [1.82, 2.73, 4.77],
|
| 164 |
+
"Hard": [31.14, 48.44, 56.06],
|
| 165 |
+
"Moderate": [60.55, 75.09, 81.66],
|
| 166 |
+
"Easy": [80.91, 87.97, 91.08],
|
| 167 |
+
},
|
| 168 |
+
"Qwen3-235B-A22B-Thinking-2507": {
|
| 169 |
+
"Overall": [42.67, 52.40, 57.20],
|
| 170 |
+
"English": [43.47, 52.80, 57.73],
|
| 171 |
+
"Chinese": [41.87, 52.00, 56.67],
|
| 172 |
+
"Extreme": [0.91, 1.82, 2.95],
|
| 173 |
+
"Hard": [29.07, 44.98, 54.67],
|
| 174 |
+
"Moderate": [60.21, 73.36, 81.66],
|
| 175 |
+
"Easy": [78.42, 90.46, 93.57],
|
| 176 |
+
},
|
| 177 |
+
"GLM-4.6": {
|
| 178 |
+
"Overall": [36.47, 43.87, 48.13],
|
| 179 |
+
"English": [34.80, 41.07, 46.27],
|
| 180 |
+
"Chinese": [38.13, 46.67, 50.00],
|
| 181 |
+
"Extreme": [2.95, 4.77, 6.59],
|
| 182 |
+
"Hard": [9.69, 15.22, 19.03],
|
| 183 |
+
"Moderate": [47.06, 62.28, 70.24],
|
| 184 |
+
"Easy": [76.76, 85.68, 90.25],
|
| 185 |
+
},
|
| 186 |
+
"Kimi-K2-Instruct-0905": {
|
| 187 |
+
"Overall": [30.73, 37.07, 40.13],
|
| 188 |
+
"English": [34.40, 39.87, 43.20],
|
| 189 |
+
"Chinese": [27.07, 34.27, 37.07],
|
| 190 |
+
"Extreme": [2.50, 4.32, 5.68],
|
| 191 |
+
"Hard": [4.50, 6.57, 8.30],
|
| 192 |
+
"Moderate": [33.91, 45.33, 54.67],
|
| 193 |
+
"Easy": [70.33, 80.29, 81.95],
|
| 194 |
+
},
|
| 195 |
+
"MiniMax-M2": {
|
| 196 |
+
"Overall": [30.13, 38.13, 44.40],
|
| 197 |
+
"English": [31.07, 40.40, 46.67],
|
| 198 |
+
"Chinese": [29.20, 35.87, 42.13],
|
| 199 |
+
"Extreme": [2.73, 4.77, 7.05],
|
| 200 |
+
"Hard": [5.19, 8.65, 13.15],
|
| 201 |
+
"Moderate": [38.06, 53.29, 63.67],
|
| 202 |
+
"Easy": [65.35, 77.18, 85.68],
|
| 203 |
+
},
|
| 204 |
+
"Ministral-3-14B-Instruct-2512": {
|
| 205 |
+
"Overall": [22.73, 28.33, 31.13],
|
| 206 |
+
"English": [25.07, 30.13, 33.47],
|
| 207 |
+
"Chinese": [20.40, 26.53, 28.80],
|
| 208 |
+
"Extreme": [2.05, 2.95, 3.86],
|
| 209 |
+
"Hard": [3.46, 7.96, 8.65],
|
| 210 |
+
"Moderate": [14.53, 20.07, 24.57],
|
| 211 |
+
"Easy": [58.09, 68.67, 73.44],
|
| 212 |
+
},
|
| 213 |
+
"Llama-3.1-405B-Instruct": {
|
| 214 |
+
"Overall": [17.87, 24.00, 27.47],
|
| 215 |
+
"English": [20.13, 26.93, 30.53],
|
| 216 |
+
"Chinese": [15.60, 21.07, 24.40],
|
| 217 |
+
"Extreme": [1.82, 3.41, 3.86],
|
| 218 |
+
"Hard": [1.38, 3.11, 5.88],
|
| 219 |
+
"Moderate": [3.81, 7.96, 10.73],
|
| 220 |
+
"Easy": [50.83, 64.94, 71.99],
|
| 221 |
+
},
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
# 从新的数据结构中提取数据用于绘图
|
| 226 |
+
# 新数据结构: data[model_name][category] = [val1, val2, val3]
|
| 227 |
+
# 需要转换为: data_list[i][model_name] = [val1, val2, val3] 的格式
|
| 228 |
+
def extract_data_for_category(category_name):
|
| 229 |
+
"""从data字典中提取指定类别的数据"""
|
| 230 |
+
category_data = {}
|
| 231 |
+
for model_name in data.keys():
|
| 232 |
+
if category_name in data[model_name]:
|
| 233 |
+
category_data[model_name] = data[model_name][category_name]
|
| 234 |
+
return category_data
|
| 235 |
+
|
| 236 |
+
# 根据subplot_names创建data_list
|
| 237 |
+
data_list = [extract_data_for_category(cat) for cat in subplot_names]
|
| 238 |
+
|
| 239 |
+
# 子图布局:上3下4
|
| 240 |
+
# 实际上是2行4列,第一行前3个放图,第4个放图例;第二行4个全放图
|
| 241 |
+
# 调整figsize使得每个子图区域接近正方形:2行4列,宽高比应为4:2=2:1
|
| 242 |
+
fig = plt.figure(figsize=(16, 8))
|
| 243 |
+
gs = fig.add_gridspec(2, 4, width_ratios=[1, 1, 1, 1], height_ratios=[1, 1]) # 2行4列的网格
|
| 244 |
+
|
| 245 |
+
axes = []
|
| 246 |
+
|
| 247 |
+
# 上面3个子图
|
| 248 |
+
for i in range(3):
|
| 249 |
+
ax = fig.add_subplot(gs[0, i])
|
| 250 |
+
ax.set_box_aspect(1) # 设置为正方形
|
| 251 |
+
axes.append(ax)
|
| 252 |
+
|
| 253 |
+
# 下面4个子图
|
| 254 |
+
for i in range(4):
|
| 255 |
+
ax = fig.add_subplot(gs[1, i])
|
| 256 |
+
ax.set_box_aspect(1) # 设置为正方形
|
| 257 |
+
axes.append(ax)
|
| 258 |
+
|
| 259 |
+
# 计算统一的纵轴范围
|
| 260 |
+
def get_group_ylim(indices):
|
| 261 |
+
group_data = [data_list[idx] for idx in indices]
|
| 262 |
+
min_vals = []
|
| 263 |
+
max_vals = []
|
| 264 |
+
for d in group_data:
|
| 265 |
+
min_vals.append(min(v[0] for v in d.values()))
|
| 266 |
+
max_vals.append(max(v[2] for v in d.values()))
|
| 267 |
+
|
| 268 |
+
g_min = min(min_vals)
|
| 269 |
+
g_max = max(max_vals)
|
| 270 |
+
y_range = g_max - g_min
|
| 271 |
+
padding = y_range * 0.15 if y_range > 0 else 1.0
|
| 272 |
+
return g_min - padding, g_max + padding
|
| 273 |
+
|
| 274 |
+
group1_ylim = get_group_ylim([0, 1, 2]) # Overall, English, Chinese
|
| 275 |
+
group2_ylim = get_group_ylim([4, 5]) # Hard, Moderate
|
| 276 |
+
|
| 277 |
+
# 绘制折线图
|
| 278 |
+
for i, ax in enumerate(axes):
|
| 279 |
+
current_data = data_list[i]
|
| 280 |
+
|
| 281 |
+
# 计算数据的最小值和最大值,增加上下留白
|
| 282 |
+
# 最大值按照N=3的值域 最小值按照N=1的值域
|
| 283 |
+
min_val = min(v[0] for v in current_data.values())
|
| 284 |
+
max_val = max(v[2] for v in current_data.values())
|
| 285 |
+
y_range = max_val - min_val
|
| 286 |
+
padding = y_range * 0.15 if y_range > 0 else 1.0
|
| 287 |
+
|
| 288 |
+
ylim_to_use = (min_val - padding, max_val + padding)
|
| 289 |
+
if i in [0, 1, 2]:
|
| 290 |
+
ylim_to_use = group1_ylim
|
| 291 |
+
elif i in [4, 5]:
|
| 292 |
+
ylim_to_use = group2_ylim
|
| 293 |
+
|
| 294 |
+
for model_name, style in model_style.items():
|
| 295 |
+
if model_name not in current_data:
|
| 296 |
+
continue
|
| 297 |
+
|
| 298 |
+
vals = current_data[model_name]
|
| 299 |
+
|
| 300 |
+
ax.plot(
|
| 301 |
+
x,
|
| 302 |
+
vals,
|
| 303 |
+
label=model_name,
|
| 304 |
+
color=style['color'],
|
| 305 |
+
marker=style['marker'],
|
| 306 |
+
linewidth=1.8,
|
| 307 |
+
markersize=7,
|
| 308 |
+
markerfacecolor=style['color'],
|
| 309 |
+
markeredgecolor=style['color'],
|
| 310 |
+
markeredgewidth=1.5
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
# 标记N=3处的最高值
|
| 314 |
+
# Find model with highest value at index 2
|
| 315 |
+
best_model = max(current_data.keys(), key=lambda m: current_data[m][2])
|
| 316 |
+
best_val = current_data[best_model][2]
|
| 317 |
+
best_style = model_style[best_model]
|
| 318 |
+
|
| 319 |
+
ax.annotate(
|
| 320 |
+
f'{best_val}',
|
| 321 |
+
(x[2], best_val),
|
| 322 |
+
textcoords='offset points',
|
| 323 |
+
xytext=(-5, 5),
|
| 324 |
+
ha='right',
|
| 325 |
+
va='bottom',
|
| 326 |
+
fontsize=11,
|
| 327 |
+
fontweight='bold',
|
| 328 |
+
color=best_style['color'],
|
| 329 |
+
fontfamily='Arial'
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
ax.set_xlabel('N (Pass@N)', fontsize=12, labelpad=8, fontweight='bold', fontfamily='Arial')
|
| 333 |
+
ax.set_ylabel('Percentage (%)', fontsize=12, labelpad=8, fontweight='bold', fontfamily='Arial')
|
| 334 |
+
|
| 335 |
+
ax.set_ylim(ylim_to_use)
|
| 336 |
+
|
| 337 |
+
# 将标题放在图内部的左上角,灰色斜体
|
| 338 |
+
font_prop = FontProperties(family='Arial', style='italic', size=14, weight='bold')
|
| 339 |
+
ax.text(0.05, 0.95, subplot_names[i], transform=ax.transAxes,
|
| 340 |
+
fontproperties=font_prop,
|
| 341 |
+
color='gray', verticalalignment='top')
|
| 342 |
+
|
| 343 |
+
ax.grid(True, which='both', linestyle='--', linewidth=0.6, color='#d0d6dc')
|
| 344 |
+
for spine in ax.spines.values():
|
| 345 |
+
spine.set_visible(True)
|
| 346 |
+
spine.set_color('black')
|
| 347 |
+
spine.set_linewidth(1.0)
|
| 348 |
+
|
| 349 |
+
ax.xaxis.set_major_locator(FixedLocator(x))
|
| 350 |
+
ax.set_xticklabels(x, fontsize=10)
|
| 351 |
+
ax.tick_params(axis='both', which='both', labelsize=10, direction='out', length=4)
|
| 352 |
+
|
| 353 |
+
# 在第一行第4个位置放统一的图例
|
| 354 |
+
ax_legend = fig.add_subplot(gs[0, 3])
|
| 355 |
+
ax_legend.axis('off') # 关闭坐标轴
|
| 356 |
+
handles, labels = axes[0].get_legend_handles_labels()
|
| 357 |
+
|
| 358 |
+
# legend 放在该子图区域的中间
|
| 359 |
+
legend = ax_legend.legend(
|
| 360 |
+
handles,
|
| 361 |
+
labels,
|
| 362 |
+
loc='center',
|
| 363 |
+
title="Models",
|
| 364 |
+
fontsize=11,
|
| 365 |
+
frameon=True,
|
| 366 |
+
prop={'weight': 'bold', 'family': 'Arial', 'size': 11},
|
| 367 |
+
title_fontsize=14
|
| 368 |
+
)
|
| 369 |
+
legend.get_title().set_fontweight('bold')
|
| 370 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 371 |
+
legend.get_frame().set_linewidth(1.0)
|
| 372 |
+
|
| 373 |
+
# 设置图例文字颜色与线条颜色一致
|
| 374 |
+
for i, text in enumerate(legend.get_texts()):
|
| 375 |
+
model_name = text.get_text()
|
| 376 |
+
text.set_color(model_style[model_name]['color'])
|
| 377 |
+
|
| 378 |
+
plt.tight_layout()
|
| 379 |
+
fig.savefig('/cpfs/user/chenziyang/LongBenchmark/draw/images/passn_line.png', dpi=300, bbox_inches='tight')
|
| 380 |
+
plt.close(fig)
|
draw/tools/similarity_scatter_3d.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from sentence_transformers import SentenceTransformer
|
| 2 |
+
from sklearn.decomposition import PCA
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from matplotlib import font_manager as fm
|
| 5 |
+
from matplotlib.font_manager import FontProperties
|
| 6 |
+
import json
|
| 7 |
+
import numpy as np
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# 字体设置
|
| 12 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 13 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 14 |
+
fm.fontManager.addfont(font_path)
|
| 15 |
+
fm.fontManager.addfont(font_bold_path)
|
| 16 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 17 |
+
plt.rcParams['font.weight'] = 'bold' # 全局设置字体加粗
|
| 18 |
+
plt.rcParams['axes.labelweight'] = 'bold' # 坐标轴标签加粗
|
| 19 |
+
plt.rcParams['axes.titleweight'] = 'bold' # 标题加粗
|
| 20 |
+
|
| 21 |
+
bold_font = FontProperties(fname=font_bold_path, weight='bold')
|
| 22 |
+
|
| 23 |
+
# 2. 文本数据
|
| 24 |
+
data_path = "/cpfs/user/chenziyang/LongBenchmark/eval/dataset/longbench_pro_final.json"
|
| 25 |
+
data = json.load(open(data_path, "r", encoding="utf-8"))
|
| 26 |
+
|
| 27 |
+
en_data = []
|
| 28 |
+
for item in data:
|
| 29 |
+
if item["language"] == "English":
|
| 30 |
+
en_data.append(item)
|
| 31 |
+
data = en_data
|
| 32 |
+
print(len(data))
|
| 33 |
+
|
| 34 |
+
# 提取文本和类别
|
| 35 |
+
texts = [item["question_nonthinking"] + "\n" + "\n".join(item["answer"]) for item in data]
|
| 36 |
+
labels = [item["primary_task"].split(".")[0] for item in data]
|
| 37 |
+
languages = [item["language"] for item in data]
|
| 38 |
+
label_array = np.array(labels)
|
| 39 |
+
|
| 40 |
+
# 3. 编码文本 → embedding(逐个编码,显示进度条)
|
| 41 |
+
embeddings_path = "/cpfs/user/chenziyang/LongBenchmark/draw/images/similarity_embeddings_en.npy"
|
| 42 |
+
if os.path.exists(embeddings_path):
|
| 43 |
+
embeddings = np.load(embeddings_path)
|
| 44 |
+
print(f"Loaded embeddings from {embeddings_path}")
|
| 45 |
+
else:
|
| 46 |
+
# 1. 加载模型
|
| 47 |
+
model = SentenceTransformer(
|
| 48 |
+
"/cpfs/user/chenziyang/LongBenchmark/eval/model/Qwen3-Embedding-8B",
|
| 49 |
+
tokenizer_kwargs={"padding_side": "left"},
|
| 50 |
+
)
|
| 51 |
+
os.makedirs(os.path.dirname(embeddings_path), exist_ok=True)
|
| 52 |
+
embeddings = []
|
| 53 |
+
for text in tqdm(texts, desc="Encoding texts"):
|
| 54 |
+
embedding = model.encode(text, convert_to_numpy=True)
|
| 55 |
+
embeddings.append(embedding)
|
| 56 |
+
embeddings = np.array(embeddings)
|
| 57 |
+
np.save(embeddings_path, embeddings)
|
| 58 |
+
print(f"Saved embeddings to {embeddings_path}")
|
| 59 |
+
|
| 60 |
+
print("Embedding shape:", embeddings.shape)
|
| 61 |
+
|
| 62 |
+
# 4. 计算类别中心并剔除离群点
|
| 63 |
+
unique_labels = np.unique(label_array)
|
| 64 |
+
class_centers = {}
|
| 65 |
+
keep_mask = np.ones(len(embeddings), dtype=bool)
|
| 66 |
+
outlier_percentile = 50
|
| 67 |
+
|
| 68 |
+
for label in unique_labels:
|
| 69 |
+
idx = np.where(label_array == label)[0]
|
| 70 |
+
if len(idx) == 0:
|
| 71 |
+
continue
|
| 72 |
+
|
| 73 |
+
class_embeddings = embeddings[idx]
|
| 74 |
+
center = class_embeddings.mean(axis=0)
|
| 75 |
+
class_centers[label] = center
|
| 76 |
+
|
| 77 |
+
if len(idx) < 5:
|
| 78 |
+
continue # 样本太少,不做离群点剔除
|
| 79 |
+
|
| 80 |
+
distances = np.linalg.norm(class_embeddings - center, axis=1)
|
| 81 |
+
threshold = np.percentile(distances, outlier_percentile)
|
| 82 |
+
outliers = idx[distances > threshold]
|
| 83 |
+
keep_mask[outliers] = False
|
| 84 |
+
|
| 85 |
+
removed_ratio = (~keep_mask).sum() / len(embeddings)
|
| 86 |
+
print(f"Removed {(~keep_mask).sum()} outliers ({removed_ratio:.2%}) using {100 - outlier_percentile}% tail cutoff.")
|
| 87 |
+
print("Sample class centers (first 3 dims):")
|
| 88 |
+
for label in unique_labels:
|
| 89 |
+
preview = class_centers[label][:3] if label in class_centers else []
|
| 90 |
+
print(f" {label}: {preview}")
|
| 91 |
+
|
| 92 |
+
embeddings_filtered = embeddings[keep_mask]
|
| 93 |
+
label_array_filtered = label_array[keep_mask]
|
| 94 |
+
print("Filtered embedding shape:", embeddings_filtered.shape)
|
| 95 |
+
|
| 96 |
+
# 5. PCA 降维:仅 3D
|
| 97 |
+
pca_3d = PCA(n_components=3)
|
| 98 |
+
points_3d = pca_3d.fit_transform(embeddings_filtered)
|
| 99 |
+
|
| 100 |
+
# 6. 获取所有唯一的类别,并按T1, T2, ..., T11排序
|
| 101 |
+
filtered_unique_labels = list(set(label_array_filtered))
|
| 102 |
+
# 按照T1, T2, ..., T11的顺序排序
|
| 103 |
+
def sort_key(label):
|
| 104 |
+
if label.startswith('T') and label[1:].isdigit():
|
| 105 |
+
return int(label[1:])
|
| 106 |
+
return 999 # 非T开头的标签排在最后
|
| 107 |
+
|
| 108 |
+
filtered_unique_labels = sorted(filtered_unique_labels, key=sort_key)
|
| 109 |
+
num_classes = len(filtered_unique_labels)
|
| 110 |
+
|
| 111 |
+
# 7. 为每个类别分配颜色
|
| 112 |
+
colors = plt.cm.tab20(np.linspace(0, 1, num_classes))
|
| 113 |
+
label_to_color = {label: colors[i] for i, label in enumerate(filtered_unique_labels)}
|
| 114 |
+
# 为T11指定特定颜色
|
| 115 |
+
if "T11" in label_to_color:
|
| 116 |
+
label_to_color["T11"] = "#FF6B6B" # 使用红色系颜色
|
| 117 |
+
|
| 118 |
+
# 8. 创建三个2D平面投影图:XY, XZ, YZ
|
| 119 |
+
# 计算数据点的实际范围
|
| 120 |
+
x_min, x_max = points_3d[:, 0].min(), points_3d[:, 0].max()
|
| 121 |
+
y_min, y_max = points_3d[:, 1].min(), points_3d[:, 1].max()
|
| 122 |
+
z_min, z_max = points_3d[:, 2].min(), points_3d[:, 2].max()
|
| 123 |
+
|
| 124 |
+
# 计算范围的中心和大小
|
| 125 |
+
x_center, y_center, z_center = (x_min + x_max) / 2, (y_min + y_max) / 2, (z_min + z_max) / 2
|
| 126 |
+
x_range, y_range, z_range = x_max - x_min, y_max - y_min, z_max - z_min
|
| 127 |
+
|
| 128 |
+
# 使用最大范围来保持等比例,并添加5%的边距
|
| 129 |
+
max_range = max(x_range, y_range, z_range) * 1.05
|
| 130 |
+
half_range = max_range / 2
|
| 131 |
+
|
| 132 |
+
# 创建图形,1行3列,横向排列
|
| 133 |
+
fig = plt.figure(figsize=(16, 6))
|
| 134 |
+
axes = []
|
| 135 |
+
|
| 136 |
+
# 定义三个平面的投影
|
| 137 |
+
projections = [
|
| 138 |
+
("XY", 0, 1, "X", "Y"), # XY平面
|
| 139 |
+
("XZ", 0, 2, "X", "Z"), # XZ平面
|
| 140 |
+
("YZ", 1, 2, "Y", "Z"), # YZ平面
|
| 141 |
+
]
|
| 142 |
+
|
| 143 |
+
# 创建三个子图
|
| 144 |
+
for i, (plane_name, dim1, dim2, label1, label2) in enumerate(projections, 1):
|
| 145 |
+
ax = fig.add_subplot(1, 3, i)
|
| 146 |
+
axes.append(ax)
|
| 147 |
+
|
| 148 |
+
# 绘制散点图
|
| 149 |
+
for label in filtered_unique_labels:
|
| 150 |
+
mask = label_array_filtered == label
|
| 151 |
+
ax.scatter(
|
| 152 |
+
points_3d[mask, dim1],
|
| 153 |
+
points_3d[mask, dim2],
|
| 154 |
+
s=10,
|
| 155 |
+
c=[label_to_color[label]],
|
| 156 |
+
alpha=0.7,
|
| 157 |
+
label=label,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# 设置坐标轴范围
|
| 161 |
+
if dim1 == 0: # X轴
|
| 162 |
+
ax.set_xlim(x_center - half_range, x_center + half_range)
|
| 163 |
+
elif dim1 == 1: # Y轴
|
| 164 |
+
ax.set_xlim(y_center - half_range, y_center + half_range)
|
| 165 |
+
else: # Z轴
|
| 166 |
+
ax.set_xlim(z_center - half_range, z_center + half_range)
|
| 167 |
+
|
| 168 |
+
if dim2 == 0: # X轴
|
| 169 |
+
ax.set_ylim(x_center - half_range, x_center + half_range)
|
| 170 |
+
elif dim2 == 1: # Y轴
|
| 171 |
+
ax.set_ylim(y_center - half_range, y_center + half_range)
|
| 172 |
+
else: # Z轴
|
| 173 |
+
ax.set_ylim(z_center - half_range, z_center + half_range)
|
| 174 |
+
|
| 175 |
+
ax.set_xlabel(label1, fontsize=12, fontweight='bold', fontfamily='Arial')
|
| 176 |
+
ax.set_ylabel(label2, fontsize=12, fontweight='bold', fontfamily='Arial')
|
| 177 |
+
ax.tick_params(axis='both', which='both', labelsize=10, direction='out', length=4)
|
| 178 |
+
ax.grid(True, alpha=0.3, linestyle='--')
|
| 179 |
+
|
| 180 |
+
# 为每个子图添加标题
|
| 181 |
+
ax.set_title(f"{plane_name} Plane", fontsize=12, pad=10, fontweight='bold', fontfamily='Arial')
|
| 182 |
+
|
| 183 |
+
# 创建共享图例
|
| 184 |
+
legend_handles = [
|
| 185 |
+
plt.Line2D(
|
| 186 |
+
[0],
|
| 187 |
+
[0],
|
| 188 |
+
marker="o",
|
| 189 |
+
color="w",
|
| 190 |
+
label=label,
|
| 191 |
+
markerfacecolor=label_to_color[label],
|
| 192 |
+
markersize=8,
|
| 193 |
+
)
|
| 194 |
+
for label in filtered_unique_labels
|
| 195 |
+
]
|
| 196 |
+
|
| 197 |
+
legend = fig.legend(
|
| 198 |
+
handles=legend_handles,
|
| 199 |
+
loc="center left",
|
| 200 |
+
bbox_to_anchor=(0.06, 0.5),
|
| 201 |
+
ncol=1,
|
| 202 |
+
fontsize=10,
|
| 203 |
+
prop={'weight': 'bold', 'family': 'Arial'}
|
| 204 |
+
)
|
| 205 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 206 |
+
legend.get_frame().set_linewidth(1.0)
|
| 207 |
+
plt.tight_layout(rect=(0.12, 0, 1, 1))
|
| 208 |
+
plt.subplots_adjust(wspace=0.4)
|
| 209 |
+
plt.savefig(
|
| 210 |
+
"/cpfs/user/chenziyang/LongBenchmark/draw/images/similarity_scatter_3d_en.png",
|
| 211 |
+
dpi=400,
|
| 212 |
+
bbox_inches="tight",
|
| 213 |
+
)
|
| 214 |
+
plt.close()
|
draw/tools/strategy_dimension.py
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
from matplotlib import font_manager as fm
|
| 4 |
+
from matplotlib.font_manager import FontProperties
|
| 5 |
+
from matplotlib.legend_handler import HandlerPatch
|
| 6 |
+
from matplotlib.patches import Rectangle
|
| 7 |
+
|
| 8 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 9 |
+
# 加载 Arial 和 Arial Bold 字体
|
| 10 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 11 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 12 |
+
fm.fontManager.addfont(font_path)
|
| 13 |
+
fm.fontManager.addfont(font_bold_path)
|
| 14 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 15 |
+
plt.rcParams['font.weight'] = 'bold' # 全局设置字体加粗
|
| 16 |
+
plt.rcParams['axes.labelweight'] = 'bold' # 坐标轴标签加粗
|
| 17 |
+
plt.rcParams['axes.titleweight'] = 'bold' # 标题加粗
|
| 18 |
+
|
| 19 |
+
# 创建加粗字体属性,直接使用 Arial Bold 字体文件
|
| 20 |
+
bold_font = FontProperties(fname=font_bold_path, weight='bold')
|
| 21 |
+
|
| 22 |
+
def draw_radar(labels, data, colors=None, title=None, figsize=(8, 6), fill_alpha=0.25,
|
| 23 |
+
group_names=None, highlight_range=True):
|
| 24 |
+
"""
|
| 25 |
+
绘制5维(或任意维度)雷达图,支持多组数据,优化显示效果。
|
| 26 |
+
labels : list of str, length = N (N维)
|
| 27 |
+
data : list of lists, each inner list length = N (多组)
|
| 28 |
+
colors : list of color strings, len >= number of groups (optional)
|
| 29 |
+
group_names : list of str, 组名标签(可选)
|
| 30 |
+
highlight_range : bool, 是否高亮显示数据范围以突出差异
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
num_vars = len(labels)
|
| 34 |
+
# 角度(闭合)——从上方开始(pi/2),顺时针
|
| 35 |
+
angles = np.linspace(0, 2*np.pi, num_vars, endpoint=False).tolist()
|
| 36 |
+
angles += angles[:1]
|
| 37 |
+
|
| 38 |
+
# 画布与子图,使用更大的尺寸
|
| 39 |
+
fig, ax = plt.subplots(figsize=figsize, subplot_kw=dict(polar=True),
|
| 40 |
+
facecolor='white', edgecolor='none')
|
| 41 |
+
# 使第一个轴在上方(默认从右侧开始)
|
| 42 |
+
ax.set_theta_offset(np.pi / 2)
|
| 43 |
+
ax.set_theta_direction(-1)
|
| 44 |
+
|
| 45 |
+
# 计算数据范围,优化显示以突出差异
|
| 46 |
+
all_vals = np.concatenate([np.array(g) for g in data])
|
| 47 |
+
data_min = all_vals.min()
|
| 48 |
+
data_max = all_vals.max()
|
| 49 |
+
data_range = data_max - data_min
|
| 50 |
+
|
| 51 |
+
# 如果highlight_range为True,缩小y轴范围以突出差异
|
| 52 |
+
if highlight_range and data_range > 0:
|
| 53 |
+
# 留出10%的边距
|
| 54 |
+
margin = data_range * 0.1
|
| 55 |
+
vmin = max(0, data_min - margin)
|
| 56 |
+
vmax_for_ticks = min(1.0, data_max + margin)
|
| 57 |
+
else:
|
| 58 |
+
vmin = max(0, np.floor(data_min * 10) / 10 - 0.1)
|
| 59 |
+
vmax_for_ticks = min(1.0, np.ceil(data_max * 10) / 10 + 0.1)
|
| 60 |
+
|
| 61 |
+
# 设置y轴范围和刻度(刻度基于vmax_for_ticks)
|
| 62 |
+
num_rings = 6 # 增加刻度数量
|
| 63 |
+
yticks = np.linspace(vmin, vmax_for_ticks, num_rings)
|
| 64 |
+
ax.set_yticks(yticks)
|
| 65 |
+
ytick_labels = ax.set_yticklabels([f"{y:.2f}" for y in yticks], fontsize=10, fontweight='bold')
|
| 66 |
+
# 确保y轴刻度标签加粗
|
| 67 |
+
for label in ax.get_yticklabels():
|
| 68 |
+
label.set_fontweight('bold')
|
| 69 |
+
label.set_weight('bold') # 双重确保
|
| 70 |
+
label.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 71 |
+
|
| 72 |
+
# y轴上限多留一些空间(不改变刻度)
|
| 73 |
+
vmax_actual = min(1.02, vmax_for_ticks + 0.02)
|
| 74 |
+
ax.set_ylim(vmin, vmax_actual)
|
| 75 |
+
|
| 76 |
+
# 画每个轴的标签(先设置标签,确保它们在网格线之上)
|
| 77 |
+
ax.set_xticks(angles[:-1])
|
| 78 |
+
ax.set_xticklabels(labels, fontsize=12, fontweight='bold')
|
| 79 |
+
# 设置标签与轴的距离(增加pad值让文字更靠外)
|
| 80 |
+
ax.tick_params(axis='x', pad=25, labelsize=10)
|
| 81 |
+
# 确保x轴标签加粗(tick_params可能会覆盖之前的设置,需要重新设置)
|
| 82 |
+
for label in ax.get_xticklabels():
|
| 83 |
+
label.set_fontweight('bold')
|
| 84 |
+
label.set_weight('bold') # 双重确保
|
| 85 |
+
label.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 86 |
+
|
| 87 |
+
# 网格线设置(参照strategy_time.py,适配极坐标图)
|
| 88 |
+
# 使用set_axisbelow让网格线在数据下方
|
| 89 |
+
ax.set_axisbelow(True)
|
| 90 |
+
ax.yaxis.grid(True, linestyle='--', linewidth=0.6, color='#d0d6dc')
|
| 91 |
+
ax.xaxis.grid(True, linestyle='--', linewidth=0.6, color='#d0d6dc')
|
| 92 |
+
|
| 93 |
+
# 手动设置所有网格线的zorder,让它们先绘制(包括最外圈圆)
|
| 94 |
+
for line in ax.yaxis.get_gridlines():
|
| 95 |
+
line.set_zorder(0)
|
| 96 |
+
for line in ax.xaxis.get_gridlines():
|
| 97 |
+
line.set_zorder(0)
|
| 98 |
+
|
| 99 |
+
# 设置极坐标图的边框(spines)的zorder也很低
|
| 100 |
+
for spine in ax.spines.values():
|
| 101 |
+
spine.set_zorder(0)
|
| 102 |
+
|
| 103 |
+
# 设置背景色
|
| 104 |
+
ax.set_facecolor('white')
|
| 105 |
+
|
| 106 |
+
# 确保刻度标签显示在网格线之上(在所有绘制完成后设置)
|
| 107 |
+
for label in ax.get_xticklabels():
|
| 108 |
+
label.set_zorder(10)
|
| 109 |
+
label.set_fontweight('bold')
|
| 110 |
+
label.set_weight('bold') # 双重确保
|
| 111 |
+
label.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 112 |
+
for label in ax.get_yticklabels():
|
| 113 |
+
label.set_zorder(10)
|
| 114 |
+
label.set_fontweight('bold')
|
| 115 |
+
label.set_weight('bold') # 双重确保
|
| 116 |
+
label.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 117 |
+
|
| 118 |
+
# 颜色默认(参照strategy_time.py)
|
| 119 |
+
default_colors = ['#E3B505', '#007C91', '#9E9E9E']
|
| 120 |
+
if colors is None:
|
| 121 |
+
colors = default_colors
|
| 122 |
+
|
| 123 |
+
# 绘制每组数据,增强视觉效果
|
| 124 |
+
line_styles = ['-', '-', '-', '-', '-'] # 实线
|
| 125 |
+
|
| 126 |
+
for idx, d in enumerate(data):
|
| 127 |
+
vals = np.array(d + d[:1]) # 闭合
|
| 128 |
+
color = colors[idx % len(colors)]
|
| 129 |
+
line_style = line_styles[idx % len(line_styles)]
|
| 130 |
+
|
| 131 |
+
# 绘制填充区域(更明显的透明度)
|
| 132 |
+
ax.fill(angles, vals, color=color, alpha=fill_alpha,
|
| 133 |
+
edgecolor='none', zorder=1)
|
| 134 |
+
|
| 135 |
+
# 绘制线条(参照strategy_time.py的线宽设置)
|
| 136 |
+
ax.plot(angles, vals, color=color, linewidth=1.5,
|
| 137 |
+
linestyle=line_style, zorder=2)
|
| 138 |
+
|
| 139 |
+
# 标题与图例(参照strategy_time.py的样式)
|
| 140 |
+
if title:
|
| 141 |
+
title_obj = plt.title(title, y=1.12, fontsize=12, pad=20, fontweight='bold', fontproperties=bold_font)
|
| 142 |
+
title_obj.set_weight('bold') # 双重确保标题加粗
|
| 143 |
+
title_obj.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 144 |
+
|
| 145 |
+
# 图例样式(参照strategy_time.py)
|
| 146 |
+
# 创建自定义处理器,使图例显示颜色块而不是线条
|
| 147 |
+
def make_legend_patch(color):
|
| 148 |
+
return Rectangle((0, 0), 1, 1, facecolor=color, edgecolor='none')
|
| 149 |
+
|
| 150 |
+
# 创建图例条目
|
| 151 |
+
legend_elements = []
|
| 152 |
+
for idx in range(len(data)):
|
| 153 |
+
color = colors[idx % len(colors)]
|
| 154 |
+
label = group_names[idx] if group_names and idx < len(group_names) else f'Group {idx+1}'
|
| 155 |
+
patch = make_legend_patch(color)
|
| 156 |
+
legend_elements.append((patch, label))
|
| 157 |
+
|
| 158 |
+
font_prop = FontProperties(weight='bold')
|
| 159 |
+
legend = ax.legend([elem[0] for elem in legend_elements],
|
| 160 |
+
[elem[1] for elem in legend_elements],
|
| 161 |
+
loc='upper right', bbox_to_anchor=(1.25, 1.05),
|
| 162 |
+
frameon=True, fontsize=10, prop=font_prop)
|
| 163 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 164 |
+
legend.get_frame().set_linewidth(1.0)
|
| 165 |
+
|
| 166 |
+
# 在数据绘制完成后,再次确保所有网格线和边框在底层
|
| 167 |
+
for line in ax.yaxis.get_gridlines():
|
| 168 |
+
line.set_zorder(0)
|
| 169 |
+
for line in ax.xaxis.get_gridlines():
|
| 170 |
+
line.set_zorder(0)
|
| 171 |
+
for spine in ax.spines.values():
|
| 172 |
+
spine.set_zorder(0)
|
| 173 |
+
|
| 174 |
+
# 处理极坐标图中可能通过patches绘制的圆形网格(但不影响数据填充区域)
|
| 175 |
+
for patch in ax.patches:
|
| 176 |
+
# 只处理zorder为0或未设置(默认)的patch,这些可能是网格相关的
|
| 177 |
+
# 数据填充区域的zorder是1或2,不应该被改变
|
| 178 |
+
if patch.get_zorder() == 0 or patch.get_zorder() is None:
|
| 179 |
+
patch.set_zorder(0)
|
| 180 |
+
|
| 181 |
+
# 再次确保标签在最上层(防止数据绘制时改变层级)
|
| 182 |
+
for label in ax.get_xticklabels():
|
| 183 |
+
label.set_zorder(10)
|
| 184 |
+
label.set_fontweight('bold')
|
| 185 |
+
label.set_weight('bold') # 双重确保
|
| 186 |
+
label.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 187 |
+
for label in ax.get_yticklabels():
|
| 188 |
+
label.set_zorder(10)
|
| 189 |
+
label.set_fontweight('bold')
|
| 190 |
+
label.set_weight('bold') # 双重确保
|
| 191 |
+
label.set_fontproperties(bold_font) # 使用FontProperties确保加粗
|
| 192 |
+
|
| 193 |
+
plt.tight_layout()
|
| 194 |
+
fig.savefig('/cpfs/user/chenziyang/LongBenchmark/draw/images/strategy_dimension.png',
|
| 195 |
+
dpi=300, bbox_inches='tight', facecolor='white', edgecolor='none')
|
| 196 |
+
plt.close(fig)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# ========== 示例用法 ==========
|
| 200 |
+
labels = ['Answer Correctness', 'Task Alignment', 'Context Requirement Alignment', 'Difficulty', 'Authenticity']
|
| 201 |
+
# 三组示例数据(每组长度=5,对应上述 labels)
|
| 202 |
+
group1 = [0.88, 1.00, 0.99, 0.87, 0.99] # 纯人工
|
| 203 |
+
group2 = [0.95, 1.00, 1.00, 0.87, 1.00] # 人机结合
|
| 204 |
+
group3 = [0.92, 1.00, 0.99, 0.57, 1.00] # 纯模型
|
| 205 |
+
|
| 206 |
+
draw_radar(labels, [group1, group2, group3],
|
| 207 |
+
colors=['#51a0dd', '#d14141', '#39c0b7'], # 参照strategy_time.py的颜色
|
| 208 |
+
group_names=['Human-only', 'Human-Model', 'Model-only'],
|
| 209 |
+
figsize=(8, 6),
|
| 210 |
+
fill_alpha=0.05,
|
| 211 |
+
highlight_range=True)
|
draw/tools/strategy_time.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
from matplotlib.ticker import FixedLocator
|
| 3 |
+
from matplotlib import font_manager as fm
|
| 4 |
+
from matplotlib.font_manager import FontProperties
|
| 5 |
+
|
| 6 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
| 7 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 8 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 9 |
+
fm.fontManager.addfont(font_path)
|
| 10 |
+
fm.fontManager.addfont(font_bold_path)
|
| 11 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 12 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 13 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 14 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 15 |
+
|
| 16 |
+
bold_font = FontProperties(fname=font_bold_path, weight='bold')
|
| 17 |
+
|
| 18 |
+
colors = ['#51a0dd', '#d14141', '#39c0b7'] # ['#2b6a99', '#f16c23', '#1b7c3d']
|
| 19 |
+
labels = ['Human-only', 'Human-Model', 'Model-only']
|
| 20 |
+
markers = ['o', 's', '^']
|
| 21 |
+
x = ["8k", "16k", "32k", "64k", "128k", "256k"]
|
| 22 |
+
x_positions = [i * 1.6 for i in range(len(x))]
|
| 23 |
+
y_values = [
|
| 24 |
+
[48, 54, 66, 78, 90, 108],
|
| 25 |
+
[36, 39, 42, 45, 48, 60],
|
| 26 |
+
[18, 18, 18, 18, 24, 30],
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
| 30 |
+
|
| 31 |
+
for y_series, color, label, marker in zip(y_values, colors, labels, markers):
|
| 32 |
+
ax.plot(
|
| 33 |
+
x_positions,
|
| 34 |
+
y_series,
|
| 35 |
+
label=label,
|
| 36 |
+
color=color,
|
| 37 |
+
linewidth=1.8,
|
| 38 |
+
marker=marker,
|
| 39 |
+
markersize=7,
|
| 40 |
+
markerfacecolor=color,
|
| 41 |
+
markeredgecolor=color,
|
| 42 |
+
markeredgewidth=1.5,
|
| 43 |
+
)
|
| 44 |
+
for x_pos, y_val in zip(x_positions, y_series):
|
| 45 |
+
ax.annotate(
|
| 46 |
+
f'{y_val}',
|
| 47 |
+
(x_pos, y_val),
|
| 48 |
+
textcoords='offset points',
|
| 49 |
+
xytext=(0, 8),
|
| 50 |
+
ha='center',
|
| 51 |
+
fontsize=10,
|
| 52 |
+
color=color,
|
| 53 |
+
weight='bold',
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# 在 'Human-only' 和 'Human-in-the-loop' 两条折线之间添加阴影
|
| 57 |
+
ax.fill_between(
|
| 58 |
+
x_positions,
|
| 59 |
+
y_values[1], # Human-in-the-loop (下边界)
|
| 60 |
+
y_values[0], # Human-only (上边界)
|
| 61 |
+
alpha=0.2,
|
| 62 |
+
color='#B0B0B0',
|
| 63 |
+
zorder=0, # 确保阴影在折线下方
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
ax.grid(True, which='both', linestyle='--', linewidth=0.6, color='#d0d6dc')
|
| 67 |
+
for spine in ax.spines.values():
|
| 68 |
+
spine.set_visible(True)
|
| 69 |
+
spine.set_color('black')
|
| 70 |
+
spine.set_linewidth(1.0)
|
| 71 |
+
|
| 72 |
+
ax.set_xlabel('Sample Length', fontsize=12, labelpad=12, fontweight='bold', fontfamily='Arial')
|
| 73 |
+
ax.set_ylabel('Mean Time per Sample (min)', fontsize=12, labelpad=12, fontweight='bold', fontfamily='Arial')
|
| 74 |
+
|
| 75 |
+
ax.xaxis.set_major_locator(FixedLocator(x_positions))
|
| 76 |
+
ax.set_xticklabels(x, fontsize=10)
|
| 77 |
+
ax.tick_params(axis='both', which='both', labelsize=10, direction='out', length=4)
|
| 78 |
+
|
| 79 |
+
legend = ax.legend(frameon=True, fontsize=10, loc='upper left', prop={'weight': 'bold', 'family': 'Arial'})
|
| 80 |
+
legend.get_frame().set_edgecolor('#b0b0b0')
|
| 81 |
+
legend.get_frame().set_linewidth(1.0)
|
| 82 |
+
|
| 83 |
+
plt.tight_layout()
|
| 84 |
+
fig.savefig('/cpfs/user/chenziyang/LongBenchmark/draw/images/strategy_time.png', dpi=300, bbox_inches='tight')
|
| 85 |
+
plt.close(fig)
|
draw/tools/task_alluvial_diagram.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import plotly.graph_objects as go
|
| 2 |
+
|
| 3 |
+
# 数据
|
| 4 |
+
old_benchmarks = ["∞BENCH", "RULER", "CLongEval", "LongBenchv2", "HELMET"]
|
| 5 |
+
new_tasks = ["T1", "T2", "T3", "T4", "T5",
|
| 6 |
+
"T6", "T7", "T8", "T9", "T10", "T11"]
|
| 7 |
+
|
| 8 |
+
# 按T编号排序,让后面的T覆盖前面的T
|
| 9 |
+
links = [
|
| 10 |
+
# T1 的连接(最底层)
|
| 11 |
+
("∞BENCH", "T1"), ("RULER", "T1"), ("CLongEval", "T1"), ("HELMET", "T1"),
|
| 12 |
+
# T2 的连接
|
| 13 |
+
("HELMET", "T2"),
|
| 14 |
+
# T3 的连接
|
| 15 |
+
("∞BENCH", "T3"), ("RULER", "T3"), ("CLongEval", "T3"), ("LongBenchv2", "T3"), ("LongBenchv2", "T3"), ("HELMET", "T3"), ("HELMET", "T3"),
|
| 16 |
+
# T4 的连接
|
| 17 |
+
("∞BENCH", "T4"), ("CLongEval", "T4"), ("HELMET", "T4"),
|
| 18 |
+
# T5 的连接
|
| 19 |
+
("HELMET", "T5"),
|
| 20 |
+
# T6 的连接
|
| 21 |
+
("RULER", "T6"),
|
| 22 |
+
# T7 的连接
|
| 23 |
+
("CLongEval", "T7"),
|
| 24 |
+
# T8 的连接
|
| 25 |
+
("∞BENCH", "T8"), ("CLongEval", "T8"), ("LongBenchv2", "T8"),
|
| 26 |
+
# T9 的连接
|
| 27 |
+
("∞BENCH", "T9"), ("LongBenchv2", "T9"),
|
| 28 |
+
# T10 的连接
|
| 29 |
+
("CLongEval", "T10"), ("LongBenchv2", "T10"), ("HELMET", "T10"),
|
| 30 |
+
# T11 的连接(最顶层)
|
| 31 |
+
("∞BENCH", "T11"), ("RULER", "T11"), ("CLongEval", "T11"), ("LongBenchv2", "T11"),
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
all_labels = old_benchmarks + new_tasks
|
| 35 |
+
|
| 36 |
+
source_indices = [all_labels.index(s) for s, t in links]
|
| 37 |
+
target_indices = [all_labels.index(t) for s, t in links]
|
| 38 |
+
values = [1] * len(links)
|
| 39 |
+
|
| 40 |
+
benchmark_colors = [
|
| 41 |
+
"#FF6B4A", "#27BFA9", "#FFC533", "#A06BFF", "#4AA6FF"
|
| 42 |
+
]
|
| 43 |
+
task_colors = [
|
| 44 |
+
"#C6E2FF", "#FFD6E8", "#C8F7DC", "#FFF7AE", "#FFE3B3",
|
| 45 |
+
"#E4D1FF", "#B9F3FF", "#E7E7E7", "#D0F0EF", "#F6E7C1", "#DAF5C4"
|
| 46 |
+
]
|
| 47 |
+
node_colors = benchmark_colors + task_colors
|
| 48 |
+
|
| 49 |
+
link_colors = [task_colors[new_tasks.index(t)] for s, t in links]
|
| 50 |
+
|
| 51 |
+
fig = go.Figure(data=[go.Sankey(
|
| 52 |
+
node=dict(
|
| 53 |
+
pad=20,
|
| 54 |
+
thickness=30,
|
| 55 |
+
line=dict(color="white", width=0),
|
| 56 |
+
label=all_labels,
|
| 57 |
+
color=node_colors
|
| 58 |
+
),
|
| 59 |
+
link=dict(
|
| 60 |
+
source=source_indices,
|
| 61 |
+
target=target_indices,
|
| 62 |
+
value=values,
|
| 63 |
+
color=link_colors
|
| 64 |
+
)
|
| 65 |
+
)])
|
| 66 |
+
|
| 67 |
+
# 更新布局,隐藏所有文本标签
|
| 68 |
+
fig.update_layout(
|
| 69 |
+
font_size=20,
|
| 70 |
+
font=dict(family="Arial Black", size=20, color="black"),
|
| 71 |
+
plot_bgcolor='white',
|
| 72 |
+
paper_bgcolor='white'
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# 隐藏所有节点标签
|
| 76 |
+
fig.update_traces(
|
| 77 |
+
node=dict(
|
| 78 |
+
label=[""] * len(all_labels) # 将所有标签设为空字符串
|
| 79 |
+
)
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# 保存为HTML文件
|
| 83 |
+
html_content = fig.to_html(include_plotlyjs=True, config={'displayModeBar': False})
|
| 84 |
+
|
| 85 |
+
# 写入文件
|
| 86 |
+
with open("/cpfs/user/chenziyang/LongBenchmark/draw/images/alluvial_diagram.html", "w", encoding="utf-8") as f:
|
| 87 |
+
f.write(html_content)
|
| 88 |
+
|
| 89 |
+
print("✅ 已保存为 alluvial_diagram.html")
|
| 90 |
+
|
| 91 |
+
|
draw/tools/task_distrubution.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from collections import defaultdict, Counter
|
| 3 |
+
import random
|
| 4 |
+
random.seed(2025)
|
| 5 |
+
|
| 6 |
+
def load_json(file_path):
|
| 7 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 8 |
+
return json.load(f)
|
| 9 |
+
|
| 10 |
+
cate2subcates = {
|
| 11 |
+
"T1": [
|
| 12 |
+
"T1.1",
|
| 13 |
+
"T1.2"
|
| 14 |
+
],
|
| 15 |
+
"T2": [
|
| 16 |
+
"T2.1",
|
| 17 |
+
"T2.2",
|
| 18 |
+
],
|
| 19 |
+
"T3": [
|
| 20 |
+
"T3.1",
|
| 21 |
+
"T3.2",
|
| 22 |
+
],
|
| 23 |
+
"T4": [
|
| 24 |
+
"T4.1",
|
| 25 |
+
"T4.2",
|
| 26 |
+
],
|
| 27 |
+
"T5": [
|
| 28 |
+
"T5.1",
|
| 29 |
+
"T5.2",
|
| 30 |
+
],
|
| 31 |
+
"T6": [
|
| 32 |
+
"T6.1",
|
| 33 |
+
"T6.2",
|
| 34 |
+
"T6.3",
|
| 35 |
+
],
|
| 36 |
+
"T7": [
|
| 37 |
+
"T7.1",
|
| 38 |
+
"T7.2",
|
| 39 |
+
"T7.3",
|
| 40 |
+
],
|
| 41 |
+
"T8": [
|
| 42 |
+
"T8.1",
|
| 43 |
+
"T8.2",
|
| 44 |
+
"T8.3",
|
| 45 |
+
],
|
| 46 |
+
"T9": [
|
| 47 |
+
"T9.1",
|
| 48 |
+
"T9.2",
|
| 49 |
+
],
|
| 50 |
+
"T10": [
|
| 51 |
+
"T10.1",
|
| 52 |
+
"T10.2",
|
| 53 |
+
],
|
| 54 |
+
"T11": [
|
| 55 |
+
"T11.1",
|
| 56 |
+
"T11.2",
|
| 57 |
+
]
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
subcate2cate = {}
|
| 61 |
+
for cate, sub_cates in cate2subcates.items():
|
| 62 |
+
for sub_cate in sub_cates:
|
| 63 |
+
subcate2cate[sub_cate] = cate
|
| 64 |
+
|
| 65 |
+
# ID
|
| 66 |
+
eval_datas_path = "/cpfs/user/chenziyang/LongBenchmark/eval/dataset/longbench_pro.json"
|
| 67 |
+
eval_datas = load_json(eval_datas_path)
|
| 68 |
+
cate_counter = Counter({cate: 0 for cate in cate2subcates})
|
| 69 |
+
sub_cate_counter = Counter({sub_cate: 0 for sub_cate in subcate2cate})
|
| 70 |
+
for eval_data in eval_datas:
|
| 71 |
+
cate_counter[eval_data['primary_task'].split('.')[0]] += 1
|
| 72 |
+
sub_cate_counter[eval_data['secondary_task'].split(' ')[0]] += 1
|
| 73 |
+
total_cate = sum(cate_counter.values())
|
| 74 |
+
total_sub = sum(sub_cate_counter.values())
|
| 75 |
+
save_path = "/cpfs/user/chenziyang/LongBenchmark/draw/images/task_distrubution.png"
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# 创建旭日图
|
| 79 |
+
import matplotlib.pyplot as plt
|
| 80 |
+
import numpy as np
|
| 81 |
+
from matplotlib.colors import LinearSegmentedColormap
|
| 82 |
+
|
| 83 |
+
startangle = 0
|
| 84 |
+
|
| 85 |
+
# 自定义主类别颜色
|
| 86 |
+
main_colors = {
|
| 87 |
+
"T1": "#C6E2FF",
|
| 88 |
+
"T2": "#FFD6E8",
|
| 89 |
+
"T3": "#C8F7DC",
|
| 90 |
+
"T4": "#FFF7AE",
|
| 91 |
+
"T5": "#FFE3B3",
|
| 92 |
+
"T6": "#E4D1FF",
|
| 93 |
+
"T7": "#B9F3FF",
|
| 94 |
+
"T8": "#E7E7E7",
|
| 95 |
+
"T9": "#D0F0EF",
|
| 96 |
+
"T10": "#F6E7C1",
|
| 97 |
+
"T11": "#DAF5C4",
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
def get_subcategory_colors(cate, num_subcates):
|
| 101 |
+
base_color = main_colors[cate]
|
| 102 |
+
# 将十六进制颜色转换为RGB
|
| 103 |
+
r = int(base_color[1:3], 16) / 255
|
| 104 |
+
g = int(base_color[3:5], 16) / 255
|
| 105 |
+
b = int(base_color[5:7], 16) / 255
|
| 106 |
+
|
| 107 |
+
# 生成透明度递减的颜色列表
|
| 108 |
+
colors = []
|
| 109 |
+
for i in range(num_subcates):
|
| 110 |
+
alpha = 0.9 - (i * 0.8 / num_subcates) # 从0.9开始,均匀递减
|
| 111 |
+
colors.append((r, g, b, alpha))
|
| 112 |
+
return colors
|
| 113 |
+
|
| 114 |
+
# 创建颜色映射
|
| 115 |
+
colors = [main_colors[cate] for cate in cate_counter.keys()]
|
| 116 |
+
|
| 117 |
+
# 创建图形
|
| 118 |
+
fig, ax = plt.subplots(figsize=(36, 36)) # 适中画布尺寸
|
| 119 |
+
|
| 120 |
+
# 工具函数提前
|
| 121 |
+
|
| 122 |
+
def get_text_width(text, fontsize):
|
| 123 |
+
"""估算文本宽度"""
|
| 124 |
+
return len(text) * fontsize * 0.6 # 0.6是一个经验系数
|
| 125 |
+
|
| 126 |
+
def get_arc_length(radius, angle_span):
|
| 127 |
+
"""计算弧长"""
|
| 128 |
+
return 2 * np.pi * radius * (angle_span / 360)
|
| 129 |
+
|
| 130 |
+
# 自动换行函数
|
| 131 |
+
def wrap_text(text, max_width, fontsize):
|
| 132 |
+
# 每两个单词换一行,但遇到单词长度>12则从该单词开始新行
|
| 133 |
+
words = text.split()
|
| 134 |
+
lines = []
|
| 135 |
+
current_line = []
|
| 136 |
+
for word in words:
|
| 137 |
+
if len(word) > 12:
|
| 138 |
+
if current_line:
|
| 139 |
+
lines.append(' '.join(current_line))
|
| 140 |
+
current_line = []
|
| 141 |
+
lines.append(word)
|
| 142 |
+
else:
|
| 143 |
+
current_line.append(word)
|
| 144 |
+
if len(current_line) == 2:
|
| 145 |
+
lines.append(' '.join(current_line))
|
| 146 |
+
current_line = []
|
| 147 |
+
if current_line:
|
| 148 |
+
lines.append(' '.join(current_line))
|
| 149 |
+
return '\n'.join(lines)
|
| 150 |
+
|
| 151 |
+
# 绘制内层(主类别)
|
| 152 |
+
wedges, texts = ax.pie(cate_counter.values(),
|
| 153 |
+
labels=None, # 移除默认标签
|
| 154 |
+
colors=colors,
|
| 155 |
+
radius=0.45, # 更小的内环外半径
|
| 156 |
+
startangle=startangle, # 从90度开始,即从顶部开始
|
| 157 |
+
wedgeprops=dict(width=0.25, edgecolor='white', linewidth=6)) # 保持宽度,白色分隔线
|
| 158 |
+
|
| 159 |
+
# 添加主类别标签到内环中间
|
| 160 |
+
inner_label_text_radius = 0.3
|
| 161 |
+
for i, (wedge, cate) in enumerate(zip(wedges, cate_counter.keys())):
|
| 162 |
+
ang = (wedge.theta2 + wedge.theta1) / 2 # 计算扇形中心角度
|
| 163 |
+
x = inner_label_text_radius * np.cos(np.deg2rad(ang)) # 同步缩小标签半径
|
| 164 |
+
y = inner_label_text_radius * np.sin(np.deg2rad(ang))
|
| 165 |
+
# 内环标签也自动换行,假设最大宽度为内环宽度的80%
|
| 166 |
+
inner_label_radius = 0.45 - 0.25 / 2
|
| 167 |
+
angle_span = wedge.theta2 - wedge.theta1
|
| 168 |
+
available_arc_length = get_arc_length(inner_label_radius, angle_span)
|
| 169 |
+
max_fontsize = 22 # 字体更大
|
| 170 |
+
min_fontsize = 20
|
| 171 |
+
fontsize = max_fontsize
|
| 172 |
+
while fontsize > min_fontsize:
|
| 173 |
+
text_width = get_text_width(cate, fontsize)
|
| 174 |
+
if text_width < available_arc_length * 0.8:
|
| 175 |
+
break
|
| 176 |
+
fontsize -= 1
|
| 177 |
+
wrapped_cate = wrap_text(cate, available_arc_length * 0.8, fontsize)
|
| 178 |
+
|
| 179 |
+
# 计算百分比
|
| 180 |
+
count = cate_counter[cate]
|
| 181 |
+
percentage = (count / total_cate * 100) if total_cate else 0
|
| 182 |
+
|
| 183 |
+
# 类别标签
|
| 184 |
+
ax.text(x, y + 0.02, wrapped_cate, ha='center', va='center', fontsize=fontsize,
|
| 185 |
+
weight='bold')
|
| 186 |
+
# 数据量和比例标签,字体更小
|
| 187 |
+
ax.text(x, y - 0.02, f'{count:,} ({percentage:.1f}%)', ha='center', va='center',
|
| 188 |
+
fontsize=fontsize-6, weight='bold')
|
| 189 |
+
|
| 190 |
+
# 准备子类别数据
|
| 191 |
+
sub_cate_data = []
|
| 192 |
+
sub_cate_labels = []
|
| 193 |
+
sub_cate_colors = []
|
| 194 |
+
|
| 195 |
+
# 为每个主类别创建对应的子类别数据
|
| 196 |
+
for cate, count in cate_counter.items():
|
| 197 |
+
# 获取该主类别下的所有子类别
|
| 198 |
+
sub_cates = cate2subcates[cate]
|
| 199 |
+
# 计算每个子类别的值
|
| 200 |
+
valid_sub_cates = [(sub_cate, sub_cate_counter[sub_cate])
|
| 201 |
+
for sub_cate in sub_cates
|
| 202 |
+
if sub_cate in sub_cate_counter]
|
| 203 |
+
|
| 204 |
+
if valid_sub_cates:
|
| 205 |
+
# 获取该主类别对应的子类别颜色
|
| 206 |
+
sub_colors = get_subcategory_colors(cate, len(valid_sub_cates))
|
| 207 |
+
|
| 208 |
+
for idx, ((sub_cate, count), color) in enumerate(zip(valid_sub_cates, sub_colors)):
|
| 209 |
+
sub_cate_data.append(count)
|
| 210 |
+
sub_cate_labels.append(sub_cate)
|
| 211 |
+
sub_cate_colors.append(color)
|
| 212 |
+
|
| 213 |
+
# 绘制外层(子类别)
|
| 214 |
+
outer_ring_radius = 0.65
|
| 215 |
+
outer_ring_width = 0.2
|
| 216 |
+
|
| 217 |
+
wedges2, texts2 = ax.pie(sub_cate_data,
|
| 218 |
+
labels=None, # 移除默认标签
|
| 219 |
+
colors=sub_cate_colors,
|
| 220 |
+
radius=outer_ring_radius, # 更小的外环外半径
|
| 221 |
+
startangle=startangle, # 从90度开始,与内环对齐
|
| 222 |
+
wedgeprops=dict(width=outer_ring_width, edgecolor='white', linewidth=6)) # 保持宽度,白色分隔线
|
| 223 |
+
|
| 224 |
+
# 添加子类别标签到外环内部
|
| 225 |
+
for i, (wedge, label) in enumerate(zip(wedges2, sub_cate_labels)):
|
| 226 |
+
ang = (wedge.theta2 + wedge.theta1) / 2 # 计算扇形中心角度
|
| 227 |
+
angle_span = wedge.theta2 - wedge.theta1 # 计算扇形角度跨度
|
| 228 |
+
|
| 229 |
+
# 计算标签位置(在外环内部)
|
| 230 |
+
outer_label_radius = outer_ring_radius - outer_ring_width / 2 # 外环半径-宽度一半
|
| 231 |
+
x = outer_label_radius * np.cos(np.deg2rad(ang))
|
| 232 |
+
y = outer_label_radius * np.sin(np.deg2rad(ang))
|
| 233 |
+
|
| 234 |
+
# 计算文本旋转角度
|
| 235 |
+
rotation = ang
|
| 236 |
+
if 90 <= ang <= 270:
|
| 237 |
+
rotation = ang + 180
|
| 238 |
+
|
| 239 |
+
# 计算可用弧长
|
| 240 |
+
available_arc_length = get_arc_length(outer_label_radius, angle_span)
|
| 241 |
+
|
| 242 |
+
# 从最大字体开始尝试,直到找到合适的字体大小
|
| 243 |
+
max_fontsize = 20
|
| 244 |
+
min_fontsize = 18
|
| 245 |
+
fontsize = max_fontsize
|
| 246 |
+
|
| 247 |
+
while fontsize > min_fontsize:
|
| 248 |
+
text_width = get_text_width(label, fontsize)
|
| 249 |
+
if text_width < available_arc_length * 0.8: # 留20%的边距
|
| 250 |
+
break
|
| 251 |
+
fontsize -= 1
|
| 252 |
+
|
| 253 |
+
# 自动换行处理
|
| 254 |
+
wrapped_label = wrap_text(label, available_arc_length * 0.8, fontsize)
|
| 255 |
+
|
| 256 |
+
# 添加标签,设置旋转角度
|
| 257 |
+
count = sub_cate_counter[label]
|
| 258 |
+
percentage = (count / total_sub * 100) if total_sub else 0
|
| 259 |
+
ax.text(x, y, f'{wrapped_label} ({count:,}, {percentage:.1f}%)',
|
| 260 |
+
ha='center', va='center',
|
| 261 |
+
fontsize=14,
|
| 262 |
+
rotation=rotation,
|
| 263 |
+
rotation_mode='anchor',
|
| 264 |
+
weight='bold',
|
| 265 |
+
color='#444444')
|
| 266 |
+
|
| 267 |
+
# 保存图表
|
| 268 |
+
plt.savefig(save_path,
|
| 269 |
+
bbox_inches='tight',
|
| 270 |
+
dpi=600, # 提高保存时的DPI
|
| 271 |
+
format='png',
|
| 272 |
+
transparent=False,
|
| 273 |
+
facecolor='white',
|
| 274 |
+
edgecolor='none')
|
| 275 |
+
plt.close()
|
| 276 |
+
|
draw/tools/task_radar.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
from matplotlib import font_manager as fm
|
| 5 |
+
from math import pi
|
| 6 |
+
|
| 7 |
+
# Data
|
| 8 |
+
data = {
|
| 9 |
+
"Gemini-2.5-Pro": {
|
| 10 |
+
"T1": 90.09, "T2": 92.42, "T3": 65.0, "T4": 54.3, "T5": 84.28,
|
| 11 |
+
"T6": 70.4, "T7": 62.75, "T8": 78.24, "T9": 87.35, "T10": 68.36, "T11": 58.89
|
| 12 |
+
},
|
| 13 |
+
"GPT-5": {
|
| 14 |
+
"T1": 90.32, "T2": 90.75, "T3": 66.67, "T4": 52.56, "T5": 81.17,
|
| 15 |
+
"T6": 67.16, "T7": 63.12, "T8": 79.8, "T9": 81.84, "T10": 68.02, "T11": 61.11
|
| 16 |
+
},
|
| 17 |
+
"Claude-4-Sonnet": {
|
| 18 |
+
"T1": 90.77, "T2": 88.9, "T3": 66.11, "T4": 53.84, "T5": 78.6,
|
| 19 |
+
"T6": 66.71, "T7": 55.18, "T8": 68.6, "T9": 85.76, "T10": 64.81, "T11": 58.89
|
| 20 |
+
},
|
| 21 |
+
"DeepSeek-V3.2": {
|
| 22 |
+
"T1": 86.28, "T2": 86.34, "T3": 62.78, "T4": 56.46, "T5": 73.68,
|
| 23 |
+
"T6": 61.69, "T7": 54.31, "T8": 66.4, "T9": 78.21, "T10": 68.19, "T11": 62.22
|
| 24 |
+
},
|
| 25 |
+
"Qwen3-235B-A22B-Thinking-2507": {
|
| 26 |
+
"T1": 87.73, "T2": 86.24, "T3": 62.5, "T4": 54.02, "T5": 77.28,
|
| 27 |
+
"T6": 64.09, "T7": 46.41, "T8": 71.17, "T9": 78.94, "T10": 65.48, "T11": 52.5
|
| 28 |
+
},
|
| 29 |
+
"GLM-4.6": {
|
| 30 |
+
"T1": 81.97, "T2": 80.07, "T3": 53.89, "T4": 54.09, "T5": 53.37,
|
| 31 |
+
"T6": 53.98, "T7": 38.05, "T8": 61.23, "T9": 67.55, "T10": 60.13, "T11": 46.67
|
| 32 |
+
},
|
| 33 |
+
"Kimi-K2-Instruct-0905": {
|
| 34 |
+
"T1": 75.84, "T2": 74.24, "T3": 48.61, "T4": 50.12, "T5": 61.98,
|
| 35 |
+
"T6": 51.65, "T7": 35.48, "T8": 59.63, "T9": 62.99, "T10": 56.07, "T11": 44.17
|
| 36 |
+
},
|
| 37 |
+
"MiniMax-M2": {
|
| 38 |
+
"T1": 76.76, "T2": 71.87, "T3": 49.72, "T4": 46.97, "T5": 54.34,
|
| 39 |
+
"T6": 51.23, "T7": 31.38, "T8": 60.39, "T9": 56.19, "T10": 55.3, "T11": 39.44
|
| 40 |
+
},
|
| 41 |
+
"Ministral-3-14B-Instruct-2512": {
|
| 42 |
+
"T1": 76.08, "T2": 69.76, "T3": 41.39, "T4": 49.94, "T5": 34.61,
|
| 43 |
+
"T6": 45.81, "T7": 22.8, "T8": 41.84, "T9": 53.55, "T10": 46.2, "T11": 35.28
|
| 44 |
+
},
|
| 45 |
+
"Llama-3.1-405B-Instruct": {
|
| 46 |
+
"T1": 66.43, "T2": 66.09, "T3": 30.0, "T4": 46.99, "T5": 35.42,
|
| 47 |
+
"T6": 44.52, "T7": 21.46, "T8": 36.87, "T9": 43.51, "T10": 37.76, "T11": 27.78
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
average_data = {
|
| 52 |
+
"T1": 82.23, "T2": 80.67, "T3": 54.67, "T4": 51.93, "T5": 63.47,
|
| 53 |
+
"T6": 57.72, "T7": 43.09, "T8": 62.42, "T9": 69.59, "T10": 59.03, "T11": 48.70
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
model_colors = {
|
| 57 |
+
'Gemini-2.5-Pro': '#E64B35',
|
| 58 |
+
'GPT-5': '#4DBBD5',
|
| 59 |
+
'Claude-4-Sonnet': '#00A087',
|
| 60 |
+
'DeepSeek-V3.2': '#F39B7F',
|
| 61 |
+
'Qwen3-235B-A22B-Thinking-2507': '#3C5488',
|
| 62 |
+
'GLM-4.6': '#91D1C2',
|
| 63 |
+
'Kimi-K2-Instruct-0905': '#925E9F',
|
| 64 |
+
'MiniMax-M2': '#8491B4',
|
| 65 |
+
'Ministral-3-14B-Instruct-2512': '#7E6148',
|
| 66 |
+
'Llama-3.1-405B-Instruct': '#B09C85'
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
# Style Configuration
|
| 70 |
+
font_path = '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf'
|
| 71 |
+
font_bold_path = '/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'
|
| 72 |
+
try:
|
| 73 |
+
fm.fontManager.addfont(font_path)
|
| 74 |
+
fm.fontManager.addfont(font_bold_path)
|
| 75 |
+
plt.rcParams['font.family'] = 'Arial'
|
| 76 |
+
except:
|
| 77 |
+
print("Warning: Arial font not found, using default.")
|
| 78 |
+
|
| 79 |
+
plt.rcParams['font.weight'] = 'bold'
|
| 80 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 81 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 82 |
+
|
| 83 |
+
# Prepare for plotting
|
| 84 |
+
models = list(data.keys())
|
| 85 |
+
categories = list(data[models[0]].keys())
|
| 86 |
+
N = len(categories)
|
| 87 |
+
|
| 88 |
+
# Compute angle for each axis
|
| 89 |
+
angles = [n / float(N) * 2 * pi for n in range(N)]
|
| 90 |
+
angles += angles[:1] # Close the loop
|
| 91 |
+
|
| 92 |
+
# Create figure
|
| 93 |
+
fig, axs = plt.subplots(2, 5, figsize=(20, 9), subplot_kw=dict(polar=True))
|
| 94 |
+
axs = axs.flatten()
|
| 95 |
+
|
| 96 |
+
# Plot each model
|
| 97 |
+
for i, model in enumerate(models):
|
| 98 |
+
ax = axs[i]
|
| 99 |
+
values = [data[model][cat] for cat in categories]
|
| 100 |
+
values += values[:1] # Close the loop
|
| 101 |
+
|
| 102 |
+
color = model_colors.get(model, 'black')
|
| 103 |
+
|
| 104 |
+
# Plot average performance
|
| 105 |
+
avg_values = [average_data[cat] for cat in categories]
|
| 106 |
+
avg_values += avg_values[:1]
|
| 107 |
+
ax.plot(angles, avg_values, color='black', linewidth=1.5, linestyle='--', alpha=0.55)
|
| 108 |
+
|
| 109 |
+
# Add Avg. label next to the line
|
| 110 |
+
ax.text((angles[0] + angles[1])/2, (avg_values[0] + avg_values[1])/2 - 12, "Avg.",
|
| 111 |
+
color='black', fontsize=8, fontweight='bold', ha='center', va='center', alpha=0.55)
|
| 112 |
+
|
| 113 |
+
# Draw the outline
|
| 114 |
+
ax.plot(angles, values, color=color, linewidth=2, linestyle='solid')
|
| 115 |
+
|
| 116 |
+
# Fill area
|
| 117 |
+
ax.fill(angles, values, color=color, alpha=0.25)
|
| 118 |
+
|
| 119 |
+
# Set y-axis limit
|
| 120 |
+
ax.set_ylim(0, 115)
|
| 121 |
+
|
| 122 |
+
# Add values as text
|
| 123 |
+
for angle, v in zip(angles[:-1], values[:-1]):
|
| 124 |
+
ax.text(angle, v + 12, f"{v:.2f}", ha='center', va='center', fontsize=8.5, fontweight='bold', color=color)
|
| 125 |
+
|
| 126 |
+
# Fix axis to be upright
|
| 127 |
+
ax.set_theta_offset(pi / 2)
|
| 128 |
+
ax.set_theta_direction(-1)
|
| 129 |
+
|
| 130 |
+
# Draw axis labels
|
| 131 |
+
ax.set_xticks(angles[:-1])
|
| 132 |
+
ax.set_xticklabels(categories, fontsize=10, fontweight='bold')
|
| 133 |
+
|
| 134 |
+
# Draw y-labels (only on the first chart to avoid clutter, or simplify them)
|
| 135 |
+
# Let's keep basic grid lines but minimize text clutter
|
| 136 |
+
ax.set_rlabel_position(0)
|
| 137 |
+
plt.setp(ax.get_yticklabels(), fontsize=9, color="grey", fontweight='bold')
|
| 138 |
+
|
| 139 |
+
# Add title
|
| 140 |
+
ax.set_title(model, size=13, color=color, weight='bold', pad=25)
|
| 141 |
+
|
| 142 |
+
# Customize grid
|
| 143 |
+
ax.grid(color='lightgrey', linestyle='--', linewidth=0.5)
|
| 144 |
+
|
| 145 |
+
# Remove spines
|
| 146 |
+
ax.spines['polar'].set_visible(False)
|
| 147 |
+
|
| 148 |
+
# Adjust layout
|
| 149 |
+
plt.tight_layout()
|
| 150 |
+
|
| 151 |
+
# Save
|
| 152 |
+
save_path = '/cpfs/user/chenziyang/LongBenchmark/draw/images/task_radar.png'
|
| 153 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 154 |
+
print(f"Radar chart saved to {save_path}")
|
draw/tools/text_source_compare.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from scipy.stats import norm
|
| 5 |
+
from scipy.optimize import curve_fit
|
| 6 |
+
from scipy.interpolate import interp1d, UnivariateSpline
|
| 7 |
+
|
| 8 |
+
# Set font for better appearance
|
| 9 |
+
# Lato is a modern, professional sans-serif font with excellent readability
|
| 10 |
+
plt.rcParams['font.family'] = 'sans-serif'
|
| 11 |
+
plt.rcParams['font.sans-serif'] = ['Lato', 'DejaVu Sans', 'Arial', 'Helvetica', 'Verdana', 'Liberation Sans']
|
| 12 |
+
plt.rcParams['mathtext.fontset'] = 'dejavusans'
|
| 13 |
+
plt.rcParams['axes.unicode_minus'] = False
|
| 14 |
+
plt.rcParams['font.weight'] = 'medium'
|
| 15 |
+
plt.rcParams['axes.labelweight'] = 'bold'
|
| 16 |
+
plt.rcParams['axes.titleweight'] = 'bold'
|
| 17 |
+
|
| 18 |
+
def gaussian_func(x, amplitude, mean, std):
|
| 19 |
+
"""
|
| 20 |
+
Gaussian function for curve fitting
|
| 21 |
+
"""
|
| 22 |
+
return amplitude * np.exp(-0.5 * ((x - mean) / std) ** 2)
|
| 23 |
+
|
| 24 |
+
def draw_distribution_histogram(data1, data2, dimensions=None, figsize=(12, 6),
|
| 25 |
+
color1='red', color2='blue',
|
| 26 |
+
label1='Distribution 1', label2='Distribution 2',
|
| 27 |
+
save_path=None):
|
| 28 |
+
"""
|
| 29 |
+
Draw histogram with curve fitting for two distributions
|
| 30 |
+
|
| 31 |
+
Parameters:
|
| 32 |
+
-----------
|
| 33 |
+
data1 : array-like
|
| 34 |
+
First distribution data (length should match number of dimensions)
|
| 35 |
+
data2 : array-like
|
| 36 |
+
Second distribution data (length should match number of dimensions)
|
| 37 |
+
dimensions : list or None
|
| 38 |
+
Dimension labels (e.g., ['Dim1', 'Dim2', ...]). If None, uses numeric labels.
|
| 39 |
+
figsize : tuple
|
| 40 |
+
Figure size
|
| 41 |
+
color1 : str
|
| 42 |
+
Color for first distribution
|
| 43 |
+
color2 : str
|
| 44 |
+
Color for second distribution
|
| 45 |
+
label1 : str
|
| 46 |
+
Label for first distribution
|
| 47 |
+
label2 : str
|
| 48 |
+
Label for second distribution
|
| 49 |
+
save_path : str
|
| 50 |
+
Path to save figure (None to not save)
|
| 51 |
+
"""
|
| 52 |
+
# Convert to numpy arrays
|
| 53 |
+
data1 = np.array(data1)
|
| 54 |
+
data2 = np.array(data2)
|
| 55 |
+
|
| 56 |
+
# Get number of dimensions
|
| 57 |
+
n_dim = len(data1)
|
| 58 |
+
|
| 59 |
+
# Create dimension labels
|
| 60 |
+
if dimensions is None:
|
| 61 |
+
dimensions = [f'Dimension {i+1}' for i in range(n_dim)]
|
| 62 |
+
|
| 63 |
+
# Create x positions
|
| 64 |
+
x = np.arange(n_dim)
|
| 65 |
+
|
| 66 |
+
# Helper function for quadratic interpolation
|
| 67 |
+
def smooth_interpolation(x_data, y_data, x_smooth):
|
| 68 |
+
"""Use quadratic interpolation for smooth curves"""
|
| 69 |
+
try:
|
| 70 |
+
f_quad = interp1d(x_data, y_data, kind='quadratic',
|
| 71 |
+
bounds_error=False, fill_value='extrapolate')
|
| 72 |
+
result = f_quad(x_smooth)
|
| 73 |
+
except:
|
| 74 |
+
# If quadratic fails (e.g., not enough points), use linear
|
| 75 |
+
try:
|
| 76 |
+
f_linear = interp1d(x_data, y_data, kind='linear',
|
| 77 |
+
bounds_error=False, fill_value='extrapolate')
|
| 78 |
+
result = f_linear(x_smooth)
|
| 79 |
+
except:
|
| 80 |
+
# Last resort: quadratic spline
|
| 81 |
+
spline = UnivariateSpline(x_data, y_data, s=0, k=min(2, len(x_data)-1))
|
| 82 |
+
result = spline(x_smooth)
|
| 83 |
+
|
| 84 |
+
# Ensure exact match at data points
|
| 85 |
+
for i, x_val in enumerate(x_data):
|
| 86 |
+
idx = np.argmin(np.abs(x_smooth - x_val))
|
| 87 |
+
result[idx] = y_data[i]
|
| 88 |
+
|
| 89 |
+
return result
|
| 90 |
+
|
| 91 |
+
# Generate smooth curves
|
| 92 |
+
x_smooth = np.linspace(0, n_dim-1, 200)
|
| 93 |
+
data1_smooth = smooth_interpolation(x, data1, x_smooth)
|
| 94 |
+
data2_smooth = smooth_interpolation(x, data2, x_smooth)
|
| 95 |
+
|
| 96 |
+
fig, ax = plt.subplots(figsize=figsize)
|
| 97 |
+
|
| 98 |
+
# Set x-axis limits to make plot more compact
|
| 99 |
+
ax.set_xlim([-0.5, n_dim - 0.5])
|
| 100 |
+
|
| 101 |
+
# Plot smooth curves first (behind)
|
| 102 |
+
ax.plot(x_smooth, data1_smooth, color=color1, linewidth=3, alpha=0.4,
|
| 103 |
+
linestyle='--', zorder=1)
|
| 104 |
+
ax.plot(x_smooth, data2_smooth, color=color2, linewidth=3, alpha=0.4,
|
| 105 |
+
linestyle='--', zorder=1)
|
| 106 |
+
|
| 107 |
+
# Fill areas under smooth curves - fill from bottom of y-axis
|
| 108 |
+
ax.fill_between(x_smooth, 0, data1_smooth, color=color1, alpha=0.15, zorder=2)
|
| 109 |
+
ax.fill_between(x_smooth, 0, data2_smooth, color=color2, alpha=0.15, zorder=2)
|
| 110 |
+
|
| 111 |
+
# Plot smooth curves with solid lines
|
| 112 |
+
ax.plot(x_smooth, data1_smooth, color=color1, linewidth=3,
|
| 113 |
+
label=label1, zorder=3)
|
| 114 |
+
ax.plot(x_smooth, data2_smooth, color=color2, linewidth=3,
|
| 115 |
+
label=label2, zorder=3)
|
| 116 |
+
|
| 117 |
+
# Plot actual data points with markers
|
| 118 |
+
ax.scatter(x, data1, color=color1, s=100, zorder=4, edgecolors='white',
|
| 119 |
+
linewidth=2)
|
| 120 |
+
ax.scatter(x, data2, color=color2, s=100, zorder=4, edgecolors='white',
|
| 121 |
+
linewidth=2, marker='s')
|
| 122 |
+
|
| 123 |
+
# Add value labels above data points
|
| 124 |
+
for i in range(len(x)):
|
| 125 |
+
# Add label for data1 (below the point)
|
| 126 |
+
ax.annotate(str(data1[i]), xy=(x[i], data1[i]),
|
| 127 |
+
xytext=(0, -10), textcoords='offset points',
|
| 128 |
+
ha='center', va='top', fontsize=9, fontweight='medium',
|
| 129 |
+
color=color1, fontfamily='sans-serif')
|
| 130 |
+
# Add label for data2 (above the point)
|
| 131 |
+
ax.annotate(str(data2[i]), xy=(x[i], data2[i]),
|
| 132 |
+
xytext=(0, 10), textcoords='offset points',
|
| 133 |
+
ha='center', va='bottom', fontsize=9, fontweight='medium',
|
| 134 |
+
color=color2, fontfamily='sans-serif')
|
| 135 |
+
|
| 136 |
+
# Set labels and title with better styling
|
| 137 |
+
ax.set_xlabel('Text Type', fontsize=14, fontweight='bold', fontfamily='sans-serif')
|
| 138 |
+
ax.set_ylabel('Sample Count', fontsize=14, fontweight='bold', fontfamily='sans-serif')
|
| 139 |
+
ax.set_xticks(x)
|
| 140 |
+
# Replace spaces with line breaks for better display
|
| 141 |
+
xticklabels = [label.replace(' ', '\n') for label in dimensions]
|
| 142 |
+
ax.set_xticklabels(xticklabels, rotation=0, ha='center', fontsize=10, fontweight='medium', fontfamily='sans-serif')
|
| 143 |
+
# Set y-axis limits - start from slightly below 0 for visual spacing
|
| 144 |
+
max_val = max(max(data1), max(data2))
|
| 145 |
+
y_max = (max_val // 50 + 1) * 50 # Round up to next 50
|
| 146 |
+
ax.set_ylim(-28, y_max)
|
| 147 |
+
|
| 148 |
+
# Set custom y-axis ticks to control spacing
|
| 149 |
+
# Start from 0 with 50 interval
|
| 150 |
+
yticks = np.arange(0, y_max + 50, 50)
|
| 151 |
+
ax.set_yticks(yticks)
|
| 152 |
+
tick_labels = [str(int(tick)) for tick in yticks if tick >= 0]
|
| 153 |
+
ax.set_yticklabels([str(int(tick)) for tick in yticks], fontsize=11, fontweight='medium', fontfamily='sans-serif')
|
| 154 |
+
ax.legend(fontsize=12, loc='best', frameon=True, fancybox=True,
|
| 155 |
+
shadow=True, framealpha=0.9, prop={'weight': 'medium', 'family': 'sans-serif'})
|
| 156 |
+
|
| 157 |
+
# Grid styling
|
| 158 |
+
ax.grid(True, alpha=0.3, axis='both', linestyle='--', linewidth=0.5)
|
| 159 |
+
ax.set_axisbelow(True)
|
| 160 |
+
|
| 161 |
+
# Add subtle background color
|
| 162 |
+
ax.set_facecolor('#fbfbfb')
|
| 163 |
+
|
| 164 |
+
# Remove all spines (no black axes)
|
| 165 |
+
ax.spines['top'].set_visible(False)
|
| 166 |
+
ax.spines['right'].set_visible(False)
|
| 167 |
+
ax.spines['left'].set_visible(False)
|
| 168 |
+
ax.spines['bottom'].set_visible(False)
|
| 169 |
+
|
| 170 |
+
plt.tight_layout(pad=0.2)
|
| 171 |
+
|
| 172 |
+
plt.savefig(save_path, dpi=300, bbox_inches='tight', pad_inches=0.05)
|
| 173 |
+
print(f"Figure saved to: {save_path}")
|
| 174 |
+
|
| 175 |
+
def main(data1, data2, save_path):
|
| 176 |
+
"""
|
| 177 |
+
Example usage with two 11-dimensional distributions
|
| 178 |
+
"""
|
| 179 |
+
# Define dimension names
|
| 180 |
+
dimensions = list(data1.keys())
|
| 181 |
+
data1 = list(data1.values())
|
| 182 |
+
data2 = list(data2.values())
|
| 183 |
+
|
| 184 |
+
draw_distribution_histogram(
|
| 185 |
+
data1=data1,
|
| 186 |
+
data2=data2,
|
| 187 |
+
dimensions=dimensions,
|
| 188 |
+
figsize=(12, 6),
|
| 189 |
+
color1='#88BDBC',
|
| 190 |
+
color2='#254E70',
|
| 191 |
+
label1='LongBench v2',
|
| 192 |
+
label2='LongBench Pro (Ours)',
|
| 193 |
+
save_path=save_path
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
if __name__ == '__main__':
|
| 197 |
+
data1_dict = {
|
| 198 |
+
"Literature (Novel)": 72,
|
| 199 |
+
"Literature (History)": 0,
|
| 200 |
+
"Literature (Other)": 0,
|
| 201 |
+
"News": 23,
|
| 202 |
+
"Science": 94,
|
| 203 |
+
"Technology": 90,
|
| 204 |
+
"Medicine": 0,
|
| 205 |
+
"Law": 33,
|
| 206 |
+
"Policy": 41,
|
| 207 |
+
"Education": 20,
|
| 208 |
+
"Finance": 37,
|
| 209 |
+
"Social": 39,
|
| 210 |
+
"Structured": 33,
|
| 211 |
+
"Other": 21
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
data2_dict = {
|
| 215 |
+
"Literature (Novel)": 320,
|
| 216 |
+
"Literature (Other)": 181,
|
| 217 |
+
"Law": 188,
|
| 218 |
+
"Literature (History)": 97,
|
| 219 |
+
"Social": 122,
|
| 220 |
+
"Finance": 113,
|
| 221 |
+
"News": 69,
|
| 222 |
+
"Education": 49,
|
| 223 |
+
"Technology": 117,
|
| 224 |
+
"Science": 110,
|
| 225 |
+
"Medicine": 27,
|
| 226 |
+
"Policy": 55,
|
| 227 |
+
"Structured": 42,
|
| 228 |
+
"Other": 23,
|
| 229 |
+
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
# 按照差值排序
|
| 233 |
+
diff_dict = {key: data2_dict[key] - data1_dict[key] for key in data2_dict.keys()}
|
| 234 |
+
diff_dict = sorted(diff_dict.items(), key=lambda x: x[1], reverse=True)
|
| 235 |
+
data1 = {key: data1_dict[key] for key, _ in diff_dict}
|
| 236 |
+
data2 = {key: data2_dict[key] for key, _ in diff_dict}
|
| 237 |
+
|
| 238 |
+
output_dir = '/cpfs/user/chenziyang/LongBenchmark/draw/images'
|
| 239 |
+
save_path = os.path.join(output_dir, 'data_source_distribution.png')
|
| 240 |
+
|
| 241 |
+
main(data1, data2, save_path)
|
eval/model/Qwen3-Embedding-8B/1
ADDED
|
File without changes
|
eval/model/Tokenizers/claude/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/model/Tokenizers/claude/tokenizer_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"bos_token": "<EOT>",
|
| 4 |
+
"clean_up_tokenization_spaces": true,
|
| 5 |
+
"eos_token": "<EOT>",
|
| 6 |
+
"model_max_length": 200000,
|
| 7 |
+
"tokenizer_class": "GPT2TokenizerFast",
|
| 8 |
+
"unk_token": "<EOT>"
|
| 9 |
+
}
|
eval/model/Tokenizers/gemini/tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/model/Tokenizers/gpt/tokenizer_config.json
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"199998": {
|
| 4 |
+
"content": "<|startoftext|>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"199999": {
|
| 12 |
+
"content": "<|endoftext|>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"200000": {
|
| 20 |
+
"content": "<|reserved_200000|>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"200001": {
|
| 28 |
+
"content": "<|reserved_200001|>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"200002": {
|
| 36 |
+
"content": "<|return|>",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
},
|
| 43 |
+
"200003": {
|
| 44 |
+
"content": "<|constrain|>",
|
| 45 |
+
"lstrip": false,
|
| 46 |
+
"normalized": false,
|
| 47 |
+
"rstrip": false,
|
| 48 |
+
"single_word": false,
|
| 49 |
+
"special": true
|
| 50 |
+
},
|
| 51 |
+
"200004": {
|
| 52 |
+
"content": "<|reserved_200004|>",
|
| 53 |
+
"lstrip": false,
|
| 54 |
+
"normalized": false,
|
| 55 |
+
"rstrip": false,
|
| 56 |
+
"single_word": false,
|
| 57 |
+
"special": true
|
| 58 |
+
},
|
| 59 |
+
"200005": {
|
| 60 |
+
"content": "<|channel|>",
|
| 61 |
+
"lstrip": false,
|
| 62 |
+
"normalized": false,
|
| 63 |
+
"rstrip": false,
|
| 64 |
+
"single_word": false,
|
| 65 |
+
"special": true
|
| 66 |
+
},
|
| 67 |
+
"200006": {
|
| 68 |
+
"content": "<|start|>",
|
| 69 |
+
"lstrip": false,
|
| 70 |
+
"normalized": false,
|
| 71 |
+
"rstrip": false,
|
| 72 |
+
"single_word": false,
|
| 73 |
+
"special": true
|
| 74 |
+
},
|
| 75 |
+
"200007": {
|
| 76 |
+
"content": "<|end|>",
|
| 77 |
+
"lstrip": false,
|
| 78 |
+
"normalized": false,
|
| 79 |
+
"rstrip": false,
|
| 80 |
+
"single_word": false,
|
| 81 |
+
"special": true
|
| 82 |
+
},
|
| 83 |
+
"200008": {
|
| 84 |
+
"content": "<|message|>",
|
| 85 |
+
"lstrip": false,
|
| 86 |
+
"normalized": false,
|
| 87 |
+
"rstrip": false,
|
| 88 |
+
"single_word": false,
|
| 89 |
+
"special": true
|
| 90 |
+
},
|
| 91 |
+
"200009": {
|
| 92 |
+
"content": "<|reserved_200009|>",
|
| 93 |
+
"lstrip": false,
|
| 94 |
+
"normalized": false,
|
| 95 |
+
"rstrip": false,
|
| 96 |
+
"single_word": false,
|
| 97 |
+
"special": true
|
| 98 |
+
},
|
| 99 |
+
"200010": {
|
| 100 |
+
"content": "<|reserved_200010|>",
|
| 101 |
+
"lstrip": false,
|
| 102 |
+
"normalized": false,
|
| 103 |
+
"rstrip": false,
|
| 104 |
+
"single_word": false,
|
| 105 |
+
"special": true
|
| 106 |
+
},
|
| 107 |
+
"200011": {
|
| 108 |
+
"content": "<|reserved_200011|>",
|
| 109 |
+
"lstrip": false,
|
| 110 |
+
"normalized": false,
|
| 111 |
+
"rstrip": false,
|
| 112 |
+
"single_word": false,
|
| 113 |
+
"special": true
|
| 114 |
+
},
|
| 115 |
+
"200012": {
|
| 116 |
+
"content": "<|call|>",
|
| 117 |
+
"lstrip": false,
|
| 118 |
+
"normalized": false,
|
| 119 |
+
"rstrip": false,
|
| 120 |
+
"single_word": false,
|
| 121 |
+
"special": true
|
| 122 |
+
},
|
| 123 |
+
"200013": {
|
| 124 |
+
"content": "<|reserved_200013|>",
|
| 125 |
+
"lstrip": false,
|
| 126 |
+
"normalized": false,
|
| 127 |
+
"rstrip": false,
|
| 128 |
+
"single_word": false,
|
| 129 |
+
"special": true
|
| 130 |
+
},
|
| 131 |
+
"200014": {
|
| 132 |
+
"content": "<|reserved_200014|>",
|
| 133 |
+
"lstrip": false,
|
| 134 |
+
"normalized": false,
|
| 135 |
+
"rstrip": false,
|
| 136 |
+
"single_word": false,
|
| 137 |
+
"special": true
|
| 138 |
+
},
|
| 139 |
+
"200015": {
|
| 140 |
+
"content": "<|reserved_200015|>",
|
| 141 |
+
"lstrip": false,
|
| 142 |
+
"normalized": false,
|
| 143 |
+
"rstrip": false,
|
| 144 |
+
"single_word": false,
|
| 145 |
+
"special": true
|
| 146 |
+
},
|
| 147 |
+
"200016": {
|
| 148 |
+
"content": "<|reserved_200016|>",
|
| 149 |
+
"lstrip": false,
|
| 150 |
+
"normalized": false,
|
| 151 |
+
"rstrip": false,
|
| 152 |
+
"single_word": false,
|
| 153 |
+
"special": true
|
| 154 |
+
},
|
| 155 |
+
"200017": {
|
| 156 |
+
"content": "<|reserved_200017|>",
|
| 157 |
+
"lstrip": false,
|
| 158 |
+
"normalized": false,
|
| 159 |
+
"rstrip": false,
|
| 160 |
+
"single_word": false,
|
| 161 |
+
"special": true
|
| 162 |
+
},
|
| 163 |
+
"200018": {
|
| 164 |
+
"content": "<|endofprompt|>",
|
| 165 |
+
"lstrip": false,
|
| 166 |
+
"normalized": false,
|
| 167 |
+
"rstrip": false,
|
| 168 |
+
"single_word": false,
|
| 169 |
+
"special": true
|
| 170 |
+
}
|
| 171 |
+
},
|
| 172 |
+
"bos_token": "<|startoftext|>",
|
| 173 |
+
"clean_up_tokenization_spaces": false,
|
| 174 |
+
"eos_token": "<|return|>",
|
| 175 |
+
"extra_special_tokens": {},
|
| 176 |
+
"model_input_names": [
|
| 177 |
+
"input_ids",
|
| 178 |
+
"attention_mask"
|
| 179 |
+
],
|
| 180 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 181 |
+
"pad_token": "<|endoftext|>",
|
| 182 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
| 183 |
+
}
|
eval/model/Tokenizers/qwen/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/model/Tokenizers/qwen/tokenizer_config.json
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|im_end|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"model_max_length": 131072,
|
| 203 |
+
"pad_token": "<|endoftext|>",
|
| 204 |
+
"split_special_tokens": false,
|
| 205 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 206 |
+
"unk_token": null
|
| 207 |
+
}
|
eval/modules/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .data_loader import DataLoader
|
| 2 |
+
from .model_manager import ModelManagerOpenAI
|
| 3 |
+
from .inference import InferenceEngine
|
| 4 |
+
from .evaluation import Evaluator
|
| 5 |
+
|
| 6 |
+
__all__ = ['DataLoader', 'ModelManagerOpenAI', 'InferenceEngine', 'Evaluator']
|
eval/modules/data_loader.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from datasets import Dataset, load_dataset
|
| 5 |
+
from typing import List, Dict, Set, Any, Tuple
|
| 6 |
+
|
| 7 |
+
# Configure logging
|
| 8 |
+
logging.basicConfig(
|
| 9 |
+
level=logging.INFO,
|
| 10 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 11 |
+
)
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
class DataLoader:
|
| 15 |
+
"""data loader"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, dataset_path: str = 'dataset/longbench_pro.json', bon_num: int = 1, total_shards: int = 1, shard_id: int = 1, seed: int = 42) -> None:
|
| 18 |
+
self.logger: logging.Logger = logging.getLogger(f"{__name__}.DataLoader")
|
| 19 |
+
self.inference_dataset, self.inference_samples_num, self.evaluation_samples_num = self.load_dataset_with_bon_shard(dataset_path, bon_num, total_shards, shard_id, seed)
|
| 20 |
+
|
| 21 |
+
def load_dataset_with_bon_shard(self, dataset_path: str, bon_num: int = 3, total_shards: int = 1, shard_id: int = 1, seed: int = 42) -> Tuple[List[Dict[str, Any]], int, int]:
|
| 22 |
+
"""load dataset with bon and shard"""
|
| 23 |
+
self.logger.info("Starting to load dataset")
|
| 24 |
+
try:
|
| 25 |
+
# TODO: load from HF
|
| 26 |
+
original_dataset: List[Dict[str, Any]] = json.load(open(dataset_path, 'r', encoding='utf-8'))
|
| 27 |
+
# self.dataset = load_dataset('', split='train')
|
| 28 |
+
bon_dataset: List[Dict[str, Any]] = []
|
| 29 |
+
# repeat the dataset bon_num times, to get the best of bon_num results
|
| 30 |
+
for i in range(bon_num):
|
| 31 |
+
for item in original_dataset:
|
| 32 |
+
tmp_data = {
|
| 33 |
+
"bon_idx": i + 1, # use for filtering data
|
| 34 |
+
"id": item["id"],
|
| 35 |
+
"context": item["context"],
|
| 36 |
+
"language": item["language"],
|
| 37 |
+
"token_length": item["token_length"],
|
| 38 |
+
"primary_task": item["primary_task"],
|
| 39 |
+
"secondary_task": item["secondary_task"],
|
| 40 |
+
"contextual_requirement": item["contextual_requirement"],
|
| 41 |
+
"question_nonthinking": item["question_nonthinking"],
|
| 42 |
+
"question_thinking": item["question_thinking"],
|
| 43 |
+
"answer": item["answer"],
|
| 44 |
+
"difficulty": item["difficulty"]
|
| 45 |
+
}
|
| 46 |
+
bon_dataset.append(tmp_data)
|
| 47 |
+
# shuffle and shard the dataset
|
| 48 |
+
shuffle_dataset: Dataset = Dataset.from_list(bon_dataset)
|
| 49 |
+
shuffle_dataset = shuffle_dataset.shuffle(seed=seed)
|
| 50 |
+
shard_dataset = shuffle_dataset.shard(
|
| 51 |
+
num_shards=total_shards, index=shard_id - 1
|
| 52 |
+
)
|
| 53 |
+
inference_dataset: List[Dict[str, Any]] = shard_dataset.to_list()
|
| 54 |
+
inference_samples_num: int = len(inference_dataset)
|
| 55 |
+
evaluation_samples_num: int = len(original_dataset) * bon_num
|
| 56 |
+
self.logger.info("Dataset loaded successfully!")
|
| 57 |
+
self.logger.info(f"Number of inference samples: [Original-{len(original_dataset)} * BoN-{bon_num} / Shard-{total_shards}] = {inference_samples_num}")
|
| 58 |
+
self.logger.info(f"Number of evaluation samples: [Original-{len(original_dataset)} * BoN-{bon_num}] = {evaluation_samples_num}")
|
| 59 |
+
except FileNotFoundError:
|
| 60 |
+
self.logger.error(f"Dataset file not found: {dataset_path}")
|
| 61 |
+
raise
|
| 62 |
+
except json.JSONDecodeError as e:
|
| 63 |
+
self.logger.error(f"Dataset file format error: {e}")
|
| 64 |
+
raise
|
| 65 |
+
except Exception as e:
|
| 66 |
+
self.logger.error(f"Unknown error occurred while loading dataset: {e}")
|
| 67 |
+
raise
|
| 68 |
+
return inference_dataset, inference_samples_num, evaluation_samples_num
|
| 69 |
+
|
| 70 |
+
def get_cached_data(self, output_file: str) -> Dict[str, Any]:
|
| 71 |
+
"""get cached data id list"""
|
| 72 |
+
has_data: Dict[str, Any] = {}
|
| 73 |
+
if os.path.exists(output_file):
|
| 74 |
+
self.logger.info(f"Found cached file: {output_file}")
|
| 75 |
+
try:
|
| 76 |
+
with open(output_file, encoding='utf-8') as f:
|
| 77 |
+
line_count: int = 0
|
| 78 |
+
empty_prediction_count: int = 0
|
| 79 |
+
for line in f:
|
| 80 |
+
line_count += 1
|
| 81 |
+
try:
|
| 82 |
+
item: Dict[str, Any] = json.loads(line)
|
| 83 |
+
# check prediction is empty
|
| 84 |
+
if not item.get("prediction") or item.get("prediction").strip() == "" or item.get("prediction") is None:
|
| 85 |
+
empty_prediction_count += 1
|
| 86 |
+
continue
|
| 87 |
+
has_data[f"{item['id']}-{item['bon_idx']}"] = item
|
| 88 |
+
except json.JSONDecodeError as e:
|
| 89 |
+
self.logger.warning(f"Line {line_count} JSON format error, skipping: {e}")
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
self.logger.info(f"Cached data processing completed - Total lines: {line_count}, Valid data: {len(has_data)}, Empty prediction: {empty_prediction_count}")
|
| 93 |
+
except Exception as e:
|
| 94 |
+
self.logger.error(f"Error occurred while reading cached file: {e}")
|
| 95 |
+
raise
|
| 96 |
+
else:
|
| 97 |
+
self.logger.warning(f"Cached file does not exist: {output_file}")
|
| 98 |
+
|
| 99 |
+
return has_data
|
| 100 |
+
|
| 101 |
+
def save_has_data(self, has_data: Dict[str, Any], output_file: str) -> None:
|
| 102 |
+
"""save has data to output_file"""
|
| 103 |
+
try:
|
| 104 |
+
backup_file: str = output_file + '.backup'
|
| 105 |
+
if os.path.exists(output_file):
|
| 106 |
+
os.rename(output_file, backup_file)
|
| 107 |
+
self.logger.info(f"Backup file created: {backup_file}")
|
| 108 |
+
else:
|
| 109 |
+
self.logger.info(f"Output file does not exist, will create new file: {output_file}")
|
| 110 |
+
|
| 111 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 112 |
+
for item in has_data.values():
|
| 113 |
+
f.write(json.dumps(item, ensure_ascii=False) + '\n')
|
| 114 |
+
self.logger.info(f"Valid data saved to: {output_file}, total {len(has_data)} items")
|
| 115 |
+
except Exception as e:
|
| 116 |
+
self.logger.error(f"Error occurred while saving data: {e}")
|
| 117 |
+
raise
|
| 118 |
+
|
| 119 |
+
def filter_new_data(self, output_file: str) -> List[Dict[str, Any]]:
|
| 120 |
+
"""filter out unprocessed data and data with empty prediction"""
|
| 121 |
+
self.logger.info("Starting to filter data that needs processing")
|
| 122 |
+
has_data: Dict[str, Any] = self.get_cached_data(output_file)
|
| 123 |
+
self.save_has_data(has_data, output_file)
|
| 124 |
+
data: List[Dict[str, Any]] = []
|
| 125 |
+
for item in self.inference_dataset:
|
| 126 |
+
if f"{item['id']}-{item['bon_idx']}" not in has_data:
|
| 127 |
+
data.append(item)
|
| 128 |
+
self.logger.info(f"Data filtering completed - Data to process: {len(data)} items")
|
| 129 |
+
return data
|
| 130 |
+
|
| 131 |
+
def split_data_for_multi_process(self, data: List[Dict[str, Any]], n_proc: int) -> List[List[Dict[str, Any]]]:
|
| 132 |
+
"""split data for multi process"""
|
| 133 |
+
data_subsets: List[List[Dict[str, Any]]] = [data[i::n_proc] for i in range(n_proc)]
|
| 134 |
+
return data_subsets
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
if __name__ == "__main__":
|
| 138 |
+
data_loader = DataLoader(dataset_path='/cpfs/user/chenziyang/LongBenchmark/eval/dataset/final/longbench_pro_1104.json', bon_num=3, total_shards=2, shard_id=2, seed=42)
|
| 139 |
+
print(len(data_loader.inference_dataset))
|
| 140 |
+
print(data_loader.inference_dataset[100]["context_id"])
|
| 141 |
+
|
eval/modules/evaluation.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import time
|
| 3 |
+
import logging
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from typing import List, Dict, Any, Optional, Tuple
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from modules.utils import *
|
| 8 |
+
from sentence_transformers import SentenceTransformer
|
| 9 |
+
|
| 10 |
+
# Configure logging
|
| 11 |
+
logging.basicConfig(
|
| 12 |
+
level=logging.INFO,
|
| 13 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 14 |
+
)
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
# silence SentenceTransformer's logger
|
| 18 |
+
logging.getLogger("sentence_transformers").setLevel(logging.WARNING)
|
| 19 |
+
|
| 20 |
+
class Evaluator:
|
| 21 |
+
"""evaluator"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, evaluation_samples_num: int, embedding_model_path: str = "model/Qwen3-Embedding-8B") -> None:
|
| 24 |
+
self.embedding_model: SentenceTransformer = SentenceTransformer(
|
| 25 |
+
embedding_model_path,
|
| 26 |
+
tokenizer_kwargs={"padding_side": "left"},
|
| 27 |
+
)
|
| 28 |
+
self.evaluation_samples_num: int = evaluation_samples_num # use this value to check the length of evaluation data
|
| 29 |
+
|
| 30 |
+
self.task_metric_config: Dict[str, str] = {
|
| 31 |
+
"T1.1 Global Cohesive Retrieval": "NDCG",
|
| 32 |
+
"T1.2 Key-Snippet Retrieval": "NDCG",
|
| 33 |
+
"T2.1 Global Timeline Reconstruction": "Pairwise_Accuracy",
|
| 34 |
+
"T2.2 Local Causal Chain Sorting": "Pairwise_Accuracy",
|
| 35 |
+
"T3.1 Multi-Doc Integration QA": "Accuracy",
|
| 36 |
+
"T3.2 Single-Hop Fact QA": "Accuracy",
|
| 37 |
+
"T4.1 Global-Coverage Constrained Summary": "Summary",
|
| 38 |
+
"T4.2 Query-Focused Summary": "Summary",
|
| 39 |
+
"T5.1 Full-Sentence Citation Alignment": "F1_Score",
|
| 40 |
+
"T5.2 Key-Statement Citation Alignment": "F1_Score",
|
| 41 |
+
"T6.1 Large-Scale Document Clustering": "SubEM",
|
| 42 |
+
"T6.2 Targeted Subset Cluster Identification": "F1_Score",
|
| 43 |
+
"T6.3 Global Frequency Analysis": "Pairwise_Accuracy",
|
| 44 |
+
"T7.1 Global Conflict & Inconsistency Localization": "F1_Score",
|
| 45 |
+
"T7.2 Targeted Rule or Condition Violation Detection": "F1_Score",
|
| 46 |
+
"T7.3 Comprehensive Error & Anomaly Sweep": "F1_Score",
|
| 47 |
+
"T8.1 Structured Multi-Source Consistency Verification": "SubEM",
|
| 48 |
+
"T8.2 Single-Source Targeted Aggregation": "SubEM",
|
| 49 |
+
"T8.3 Long-Context Procedural State Tracking": "SubEM",
|
| 50 |
+
"T9.1 Dependency-Aware Multi-Version Impact Analysis": "F1_Score",
|
| 51 |
+
"T9.2 Localized Interface Change Detection": "F1_Score",
|
| 52 |
+
"T10.1 Large-Scale In-Context Rule Induction": "SubEM",
|
| 53 |
+
"T10.2 Targeted Example-Based Rule Induction": "SubEM",
|
| 54 |
+
"T11.1 Long-Range Entity & Commitment Tracking": "Accuracy",
|
| 55 |
+
"T11.2 Short-Range Reference Resolution & State Query": "Accuracy"
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
self.evaluate_configs: Dict[str, List[str]] = {
|
| 59 |
+
"token_length": ["8k", "16k", "32k", "64k", "128k", "256k"],
|
| 60 |
+
"contextual_requirement": ["Full", "Partial"],
|
| 61 |
+
"difficulty": ["Easy", "Moderate", "Hard", "Extreme"],
|
| 62 |
+
"primary_task": [
|
| 63 |
+
"T1. Retrieval & Ranking",
|
| 64 |
+
"T2. Sequencing & Structure Reconstruction",
|
| 65 |
+
"T3. Evidence-Grounded QA",
|
| 66 |
+
"T4. Summarization & Synthesis",
|
| 67 |
+
"T5. Attribution & Citation Alignment",
|
| 68 |
+
"T6. Aggregation & Clustering",
|
| 69 |
+
"T7. Consistency & Compliance Checking",
|
| 70 |
+
"T8. Structured & Numeric Reasoning",
|
| 71 |
+
"T9. Version & Code Diff Analysis",
|
| 72 |
+
"T10. Rule Induction & In-Context Learning",
|
| 73 |
+
"T11. Dialogue Memory & Long-Horizon Tracking"
|
| 74 |
+
],
|
| 75 |
+
"language": ["Chinese", "English"]
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
def load_jsonl_file(self, file_path: str) -> List[Any]:
|
| 79 |
+
data: List[Any] = []
|
| 80 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
| 81 |
+
for line in file:
|
| 82 |
+
data.append(json.loads(line))
|
| 83 |
+
return data
|
| 84 |
+
|
| 85 |
+
def load_jsonl_files(self, file_paths: List[str]) -> List[Any]:
|
| 86 |
+
all_data: List[Any] = []
|
| 87 |
+
for file_path in file_paths:
|
| 88 |
+
data: List[Any] = self.load_jsonl_file(file_path)
|
| 89 |
+
all_data.extend(data)
|
| 90 |
+
return all_data
|
| 91 |
+
|
| 92 |
+
def save_json_file(self, data: List[Any], file_path: str) -> None:
|
| 93 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
| 94 |
+
json.dump(data, f, ensure_ascii=False, indent=4)
|
| 95 |
+
|
| 96 |
+
def calculate_metric(self, secondary_task: str, answer: List[str], prediction: str, is_zh: bool) -> Tuple[bool, float]:
|
| 97 |
+
"""
|
| 98 |
+
calculate metric for single item
|
| 99 |
+
"""
|
| 100 |
+
try:
|
| 101 |
+
if prediction == "":
|
| 102 |
+
return False, 0.0
|
| 103 |
+
|
| 104 |
+
metric_name: str = self.task_metric_config[secondary_task]
|
| 105 |
+
|
| 106 |
+
if metric_name == "NDCG":
|
| 107 |
+
metric_value = NDCG(answer, prediction)
|
| 108 |
+
elif metric_name == "Pairwise_Accuracy":
|
| 109 |
+
metric_value = Pairwise_Accuracy(answer, prediction)
|
| 110 |
+
elif metric_name == "Accuracy":
|
| 111 |
+
metric_value = Accuracy(answer, prediction)
|
| 112 |
+
elif metric_name == "F1_Score":
|
| 113 |
+
metric_value = F1_Score(answer, prediction)
|
| 114 |
+
elif metric_name == "SubEM":
|
| 115 |
+
metric_value = SubEM(answer, prediction)
|
| 116 |
+
elif metric_name == "Summary":
|
| 117 |
+
metric_value = Summary(self.embedding_model, answer, prediction, is_zh)
|
| 118 |
+
else:
|
| 119 |
+
logger.warning(f"Unknown metric: {metric_name}")
|
| 120 |
+
return False, 0.0
|
| 121 |
+
|
| 122 |
+
# validate metric value range
|
| 123 |
+
assert 0.0 <= metric_value <= 1.0, f"Metric {metric_value} is not in [0, 1]"
|
| 124 |
+
return True, metric_value
|
| 125 |
+
|
| 126 |
+
except Exception as e:
|
| 127 |
+
logger.error(f"Error calculating metric for {secondary_task}: {e}")
|
| 128 |
+
return False, 0.0
|
| 129 |
+
|
| 130 |
+
def evaluate_single_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
|
| 131 |
+
"""evaluate single item"""
|
| 132 |
+
secondary_task: str = item["secondary_task"]
|
| 133 |
+
is_zh: bool = True if item["language"] == "Chinese" else False
|
| 134 |
+
answer: List[str] = item["answer"]
|
| 135 |
+
key_words: Optional[List[str]] = item.get("key_words", None)
|
| 136 |
+
prediction: str = item["prediction"]
|
| 137 |
+
|
| 138 |
+
# calculate metric
|
| 139 |
+
success, metric_value = self.calculate_metric(secondary_task, answer, prediction, is_zh)
|
| 140 |
+
item["metric"] = metric_value
|
| 141 |
+
|
| 142 |
+
return success, item
|
| 143 |
+
|
| 144 |
+
def evaluate(self, inference_files_in_shard_group: List[str], evaluation_file: str) -> None:
|
| 145 |
+
"""sequential evaluation"""
|
| 146 |
+
logger.info(f"load data from {inference_files_in_shard_group}")
|
| 147 |
+
data: List[Any] = self.load_jsonl_files(inference_files_in_shard_group)
|
| 148 |
+
|
| 149 |
+
# check data length
|
| 150 |
+
assert len(data) == self.evaluation_samples_num, f"inference datas num {len(data)} != evaluation samples num {self.evaluation_samples_num}, please infer again!"
|
| 151 |
+
|
| 152 |
+
# clear output file
|
| 153 |
+
with open(evaluation_file, 'w', encoding='utf-8') as f:
|
| 154 |
+
pass
|
| 155 |
+
|
| 156 |
+
# sequential evaluation
|
| 157 |
+
fail_samples_num = 0 # number of samples with failed inference or evaluation
|
| 158 |
+
logger.info(f"start sequential evaluation of {len(data)} items")
|
| 159 |
+
for item in tqdm(data, desc="evaluation progress"):
|
| 160 |
+
success, result = self.evaluate_single_item(item)
|
| 161 |
+
if not success:
|
| 162 |
+
fail_samples_num += 1
|
| 163 |
+
# write result to file
|
| 164 |
+
with open(evaluation_file, 'a', encoding='utf-8') as fout:
|
| 165 |
+
fout.write(json.dumps(result, ensure_ascii=False) + '\n')
|
| 166 |
+
fout.flush()
|
| 167 |
+
|
| 168 |
+
logger.info(f"evaluation completed, results saved to: {evaluation_file}")
|
| 169 |
+
if fail_samples_num > 0:
|
| 170 |
+
logger.warning(f"there are {fail_samples_num} samples with failed inference or evaluation, please check them carefully!")
|
| 171 |
+
return fail_samples_num
|
| 172 |
+
|
| 173 |
+
def metric_summary(self, evaluation_file: str, fail_samples_num: int, summary_file: str, bon_num: int = 3) -> None:
|
| 174 |
+
"""summary metrics"""
|
| 175 |
+
logger.info(f"summary metrics from {evaluation_file}")
|
| 176 |
+
data: List[Any] = self.load_jsonl_file(evaluation_file)
|
| 177 |
+
|
| 178 |
+
summary_results: Dict[str, Any] = {}
|
| 179 |
+
summary_results['date'] = datetime.now().strftime("%Y-%m-%d")
|
| 180 |
+
|
| 181 |
+
# calculate average overall metrics for all inference iterations
|
| 182 |
+
logger.info(f"calculate average overall metrics for all inference iterations")
|
| 183 |
+
average_overall_results, inference_inconsistent_samples_num = get_average_overall_results(data, bon_num)
|
| 184 |
+
if inference_inconsistent_samples_num > 0:
|
| 185 |
+
logger.warning(f"there are {inference_inconsistent_samples_num} samples with inconsistent inferences, their inference number is not equal to {bon_num}, check them carefully!")
|
| 186 |
+
|
| 187 |
+
summary_results['total_questions_num'] = len(average_overall_results)
|
| 188 |
+
summary_results['inference_iterations'] = bon_num
|
| 189 |
+
summary_results['total_samples_num'] = len(data)
|
| 190 |
+
summary_results['fail_samples_num'] = fail_samples_num
|
| 191 |
+
summary_results['inference_inconsistent_samples_num'] = inference_inconsistent_samples_num
|
| 192 |
+
summary_results['average_overall_metric'] = calculate_overall_metrics(average_overall_results)
|
| 193 |
+
|
| 194 |
+
# calculate overall metrics for each inference iteration
|
| 195 |
+
for inference_iteration_idx in range(1, bon_num + 1):
|
| 196 |
+
logger.info(f"calculate overall metric for inference iteration {inference_iteration_idx}")
|
| 197 |
+
summary_results[f'inference_iteration_{inference_iteration_idx}_overall_metric'] = calculate_overall_metrics(get_inference_iteration_idx_results(data, inference_iteration_idx))
|
| 198 |
+
|
| 199 |
+
# calculate average metrics for each dimension
|
| 200 |
+
for dimension, sort_keys in self.evaluate_configs.items():
|
| 201 |
+
summary_results[f'average_{dimension}_metric'] = calculate_dimension_metrics(average_overall_results, dimension, sort_keys)
|
| 202 |
+
|
| 203 |
+
# cycle calculate best-of-n & pass@n metrics
|
| 204 |
+
for i in range(1, bon_num + 1):
|
| 205 |
+
logger.info(f"calculate metrics for BoN-{i} & pass@{i} metrics")
|
| 206 |
+
summary_results_bon_i: Dict[str, Any] = {}
|
| 207 |
+
|
| 208 |
+
# one question may have multiple results, get the best result of BoN-i for each question
|
| 209 |
+
best_of_n_results = get_best_of_n_results(data, i)
|
| 210 |
+
summary_results_bon_i['overall_metric'] = calculate_overall_metrics(best_of_n_results)
|
| 211 |
+
for dimension, sort_keys in self.evaluate_configs.items():
|
| 212 |
+
summary_results_bon_i[dimension] = calculate_dimension_metrics(best_of_n_results, dimension, sort_keys)
|
| 213 |
+
|
| 214 |
+
summary_results['BoN-' + str(i)] = summary_results_bon_i
|
| 215 |
+
summary_results['pass@' + str(i)] = calculate_pass_n_metrics(best_of_n_results)
|
| 216 |
+
|
| 217 |
+
# save metrics
|
| 218 |
+
self.save_json_file(summary_results, summary_file)
|
| 219 |
+
logger.info(f"metrics summary saved to: {summary_file}")
|
eval/modules/inference.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import logging
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import torch.multiprocessing as mp
|
| 5 |
+
import json
|
| 6 |
+
from typing import List, Dict, Any, Optional
|
| 7 |
+
|
| 8 |
+
# Configure logging
|
| 9 |
+
logging.basicConfig(
|
| 10 |
+
level=logging.INFO,
|
| 11 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 12 |
+
)
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
class InferenceEngine:
|
| 16 |
+
"""inference engine"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, model_manager: Any, thinking_enabled: bool) -> None:
|
| 19 |
+
self.model_manager = model_manager
|
| 20 |
+
self.thinking_enabled = thinking_enabled
|
| 21 |
+
|
| 22 |
+
def prepare_prompt(self, item: Dict[str, Any]) -> str:
|
| 23 |
+
"""prepare prompt"""
|
| 24 |
+
context = item['context']
|
| 25 |
+
if self.thinking_enabled:
|
| 26 |
+
question = item['question_thinking']
|
| 27 |
+
else:
|
| 28 |
+
question = item['question_nonthinking']
|
| 29 |
+
return f"{context}\n\n\n\n{question}"
|
| 30 |
+
|
| 31 |
+
def process_single_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
|
| 32 |
+
"""process single item"""
|
| 33 |
+
prompt = self.prepare_prompt(item)
|
| 34 |
+
prediction, thinking = self.model_manager.query(prompt)
|
| 35 |
+
if prediction is not None:
|
| 36 |
+
prediction = prediction.strip()
|
| 37 |
+
else:
|
| 38 |
+
prediction = ""
|
| 39 |
+
item['prediction'] = prediction
|
| 40 |
+
item['thinking'] = thinking
|
| 41 |
+
item['context'] = item['context'][:512]
|
| 42 |
+
return item
|
| 43 |
+
|
| 44 |
+
def process_data_subset(self, data_subset: List[Dict[str, Any]], output_file: str, file_lock: mp.Lock) -> None:
|
| 45 |
+
"""process data subset"""
|
| 46 |
+
for item in tqdm(data_subset, desc=f"process data subset"):
|
| 47 |
+
result = self.process_single_item(item)
|
| 48 |
+
# Use process lock to ensure atomic file writing
|
| 49 |
+
with file_lock:
|
| 50 |
+
with open(output_file, 'a', encoding='utf-8') as fout:
|
| 51 |
+
fout.write(json.dumps(result, ensure_ascii=False) + '\n')
|
| 52 |
+
fout.flush()
|
| 53 |
+
|
| 54 |
+
def infer(self, data_subsets: List[List[Dict[str, Any]]], output_file: str) -> None:
|
| 55 |
+
"""infer"""
|
| 56 |
+
logger.info(f"Starting {len(data_subsets)} processes for parallel processing...")
|
| 57 |
+
processes: List[mp.Process] = [] # Fixed undefined processes variable
|
| 58 |
+
file_lock = mp.Lock() # Create process lock
|
| 59 |
+
|
| 60 |
+
for data_subset in data_subsets:
|
| 61 |
+
p = mp.Process(
|
| 62 |
+
target=self.process_data_subset,
|
| 63 |
+
args=(data_subset, output_file, file_lock)
|
| 64 |
+
)
|
| 65 |
+
p.start()
|
| 66 |
+
processes.append(p)
|
| 67 |
+
|
| 68 |
+
# Wait for all processes to complete
|
| 69 |
+
for p in processes:
|
| 70 |
+
p.join()
|
| 71 |
+
|
| 72 |
+
logger.info("All processes completed successfully")
|
eval/modules/model_manager.py
ADDED
|
@@ -0,0 +1,769 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import time
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Optional, Dict, Any, Callable, Tuple
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
import requests
|
| 8 |
+
from functools import wraps
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# configure logging
|
| 12 |
+
logging.basicConfig(
|
| 13 |
+
level=logging.INFO,
|
| 14 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 15 |
+
)
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
def model_query_decorator(func: Callable) -> Callable:
|
| 19 |
+
"""
|
| 20 |
+
model query decorator, handle common pre-processing and retry logic
|
| 21 |
+
get max_tries and time_sleep from self instance dynamically
|
| 22 |
+
"""
|
| 23 |
+
@wraps(func)
|
| 24 |
+
def wrapper(self, prompt: str) -> str:
|
| 25 |
+
# pre-processing: check empty prompt
|
| 26 |
+
if not prompt or not prompt.strip():
|
| 27 |
+
logger.warning("Empty prompt, return empty string")
|
| 28 |
+
return "", None
|
| 29 |
+
|
| 30 |
+
# pre-processing: truncate prompt
|
| 31 |
+
processed_prompt = self._truncate_prompt(prompt.strip())
|
| 32 |
+
|
| 33 |
+
# get retry parameters from self instance
|
| 34 |
+
max_tries = getattr(self, 'max_tries', 5)
|
| 35 |
+
time_sleep = getattr(self, 'time_sleep', 1.0)
|
| 36 |
+
|
| 37 |
+
time.sleep(time_sleep)
|
| 38 |
+
|
| 39 |
+
# retry mechanism
|
| 40 |
+
last_exception = None
|
| 41 |
+
|
| 42 |
+
for attempt in range(1, max_tries + 1):
|
| 43 |
+
try:
|
| 44 |
+
logger.info(f"Try {attempt} times...")
|
| 45 |
+
|
| 46 |
+
# call the specific query method
|
| 47 |
+
answer, thinking = func(self, processed_prompt)
|
| 48 |
+
|
| 49 |
+
logger.info(f"Query successful, try {attempt} times")
|
| 50 |
+
|
| 51 |
+
return answer, thinking
|
| 52 |
+
|
| 53 |
+
except KeyboardInterrupt:
|
| 54 |
+
logger.info("User interrupt")
|
| 55 |
+
raise
|
| 56 |
+
except Exception as e:
|
| 57 |
+
last_exception = e
|
| 58 |
+
logger.warning(f"API error (try {attempt}/{max_tries}): {e}")
|
| 59 |
+
|
| 60 |
+
# exponential backoff strategy
|
| 61 |
+
if attempt < max_tries:
|
| 62 |
+
sleep_time = time_sleep * (2 ** (attempt - 1))
|
| 63 |
+
logger.info(f"Wait {sleep_time:.1f} seconds and retry...")
|
| 64 |
+
time.sleep(sleep_time)
|
| 65 |
+
|
| 66 |
+
# all tries failed
|
| 67 |
+
logger.error(f"All {max_tries} tries failed, last error: {last_exception}")
|
| 68 |
+
return "", None
|
| 69 |
+
|
| 70 |
+
return wrapper
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class ModelManagerBase:
|
| 74 |
+
"""Base model manager"""
|
| 75 |
+
|
| 76 |
+
def __init__(
|
| 77 |
+
self,
|
| 78 |
+
tokenizer_path: str,
|
| 79 |
+
context_max_length: int,
|
| 80 |
+
url: str,
|
| 81 |
+
api_key: str,
|
| 82 |
+
temperature: float,
|
| 83 |
+
max_new_tokens: int,
|
| 84 |
+
timeout: int,
|
| 85 |
+
max_tries: int,
|
| 86 |
+
time_sleep: float,
|
| 87 |
+
):
|
| 88 |
+
# parameter validation
|
| 89 |
+
if not os.path.exists(tokenizer_path):
|
| 90 |
+
raise ValueError("tokenizer_path is not found")
|
| 91 |
+
if context_max_length <= 0:
|
| 92 |
+
raise ValueError("context_max_length must be greater than 0")
|
| 93 |
+
if max_tries <= 0:
|
| 94 |
+
raise ValueError("max_tries must be greater than 0")
|
| 95 |
+
|
| 96 |
+
self.tokenizer_path = tokenizer_path
|
| 97 |
+
self.context_max_length = context_max_length
|
| 98 |
+
self.url = url
|
| 99 |
+
self.api_key = api_key
|
| 100 |
+
self.temperature = temperature
|
| 101 |
+
self.max_new_tokens = max_new_tokens
|
| 102 |
+
self.timeout = timeout
|
| 103 |
+
self.max_tries = max_tries
|
| 104 |
+
self.time_sleep = time_sleep
|
| 105 |
+
self.tokenizer = self._get_tokenizer()
|
| 106 |
+
|
| 107 |
+
def _get_tokenizer(self) -> AutoTokenizer:
|
| 108 |
+
"""Get tokenizer"""
|
| 109 |
+
try:
|
| 110 |
+
return AutoTokenizer.from_pretrained(
|
| 111 |
+
self.tokenizer_path,
|
| 112 |
+
trust_remote_code=True
|
| 113 |
+
)
|
| 114 |
+
except Exception as e:
|
| 115 |
+
logger.error(f"Failed to load tokenizer: {e}")
|
| 116 |
+
raise
|
| 117 |
+
|
| 118 |
+
def _truncate_prompt(self, prompt: str) -> str:
|
| 119 |
+
"""Truncate prompt, keep important parts"""
|
| 120 |
+
input_ids = self.tokenizer.encode(prompt)
|
| 121 |
+
|
| 122 |
+
if len(input_ids) <= self.context_max_length:
|
| 123 |
+
return prompt
|
| 124 |
+
|
| 125 |
+
truncated_input_ids = input_ids[:self.context_max_length//2] + input_ids[-self.context_max_length//2:]
|
| 126 |
+
|
| 127 |
+
truncated_prompt = self.tokenizer.decode(
|
| 128 |
+
truncated_input_ids,
|
| 129 |
+
skip_special_tokens=True
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
return truncated_prompt
|
| 133 |
+
|
| 134 |
+
@model_query_decorator
|
| 135 |
+
def query(self, processed_prompt: str) -> str:
|
| 136 |
+
"""Query LLM model"""
|
| 137 |
+
raise NotImplementedError("Subclass must implement this method")
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class ModelManagerMagistral(ModelManagerBase):
|
| 141 |
+
"""Magistral model manager"""
|
| 142 |
+
|
| 143 |
+
def __init__(
|
| 144 |
+
self,
|
| 145 |
+
model_name: str,
|
| 146 |
+
tokenizer_path: str = "",
|
| 147 |
+
context_max_length: int = 120000, # 128k - 8k
|
| 148 |
+
url: str = "http://127.0.0.1:8000/v1",
|
| 149 |
+
api_key: str = "EMPTY",
|
| 150 |
+
temperature: float = 0.7,
|
| 151 |
+
max_new_tokens: int = 8192,
|
| 152 |
+
timeout: int = 1200,
|
| 153 |
+
max_tries: int = 5,
|
| 154 |
+
time_sleep: float = 1.0,
|
| 155 |
+
extra_body: Optional[Dict[str, Any]] = None,
|
| 156 |
+
):
|
| 157 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 158 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 159 |
+
self.model_name = model_name
|
| 160 |
+
self.extra_body = extra_body or {}
|
| 161 |
+
self.client = self._create_client()
|
| 162 |
+
self.system_prompt = """First draft your thinking process (inner monologue) until you arrive at a response. Format your response using Markdown, and use LaTeX for any mathematical equations. Write both your thoughts and the response in the same language as the input.\n\nYour thinking process must follow the template below:[THINK]Your thoughts or/and draft, like working through an exercise on scratch paper. Be as casual and as long as you want until you are confident to generate the response. Use the same language as the input.[/THINK]Here, provide a self-contained response."""
|
| 163 |
+
|
| 164 |
+
def _create_client(self) -> OpenAI:
|
| 165 |
+
"""Create Magistral client"""
|
| 166 |
+
try:
|
| 167 |
+
return OpenAI(
|
| 168 |
+
base_url=self.url,
|
| 169 |
+
api_key=self.api_key
|
| 170 |
+
)
|
| 171 |
+
except Exception as e:
|
| 172 |
+
logger.error(f"Failed to create OpenAI client: {e}")
|
| 173 |
+
raise
|
| 174 |
+
|
| 175 |
+
@model_query_decorator
|
| 176 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 177 |
+
"""Query LLM model - only handle OpenAI specific logic"""
|
| 178 |
+
completion = self.client.chat.completions.create(
|
| 179 |
+
model=self.model_name,
|
| 180 |
+
messages=[
|
| 181 |
+
{"role": "system", "content": self.system_prompt},
|
| 182 |
+
{"role": "user", "content": processed_prompt}
|
| 183 |
+
],
|
| 184 |
+
temperature=self.temperature,
|
| 185 |
+
extra_body=self.extra_body,
|
| 186 |
+
max_tokens=self.max_new_tokens,
|
| 187 |
+
timeout=self.timeout,
|
| 188 |
+
)
|
| 189 |
+
answer = completion.choices[0].message.content
|
| 190 |
+
try:
|
| 191 |
+
thinking = completion.choices[0].message.reasoning_content
|
| 192 |
+
except:
|
| 193 |
+
thinking = None
|
| 194 |
+
return answer, thinking
|
| 195 |
+
|
| 196 |
+
class ModelManagerOpenAI(ModelManagerBase):
|
| 197 |
+
"""OpenAI model manager"""
|
| 198 |
+
|
| 199 |
+
def __init__(
|
| 200 |
+
self,
|
| 201 |
+
model_name: str,
|
| 202 |
+
tokenizer_path: str = "model/Tokenizers/qwen",
|
| 203 |
+
context_max_length: int = 120000, # 128k - 8k
|
| 204 |
+
url: str = "http://127.0.0.1:8000/v1",
|
| 205 |
+
api_key: str = "EMPTY",
|
| 206 |
+
temperature: float = 1.0,
|
| 207 |
+
max_new_tokens: int = 8192,
|
| 208 |
+
timeout: int = 1200,
|
| 209 |
+
max_tries: int = 5,
|
| 210 |
+
time_sleep: float = 1.0,
|
| 211 |
+
extra_body: Optional[Dict[str, Any]] = None,
|
| 212 |
+
):
|
| 213 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 214 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 215 |
+
self.model_name = model_name
|
| 216 |
+
self.extra_body = extra_body or {}
|
| 217 |
+
self.client = self._create_client()
|
| 218 |
+
|
| 219 |
+
def _create_client(self) -> OpenAI:
|
| 220 |
+
"""Create OpenAI client"""
|
| 221 |
+
try:
|
| 222 |
+
return OpenAI(
|
| 223 |
+
base_url=self.url,
|
| 224 |
+
api_key=self.api_key
|
| 225 |
+
)
|
| 226 |
+
except Exception as e:
|
| 227 |
+
logger.error(f"Failed to create OpenAI client: {e}")
|
| 228 |
+
raise
|
| 229 |
+
|
| 230 |
+
@model_query_decorator
|
| 231 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 232 |
+
"""Query LLM model - only handle OpenAI specific logic"""
|
| 233 |
+
completion = self.client.chat.completions.create(
|
| 234 |
+
model=self.model_name,
|
| 235 |
+
messages=[{"role": "user", "content": processed_prompt}],
|
| 236 |
+
temperature=self.temperature,
|
| 237 |
+
extra_body=self.extra_body,
|
| 238 |
+
max_tokens=self.max_new_tokens,
|
| 239 |
+
timeout=self.timeout,
|
| 240 |
+
)
|
| 241 |
+
answer = completion.choices[0].message.content
|
| 242 |
+
try:
|
| 243 |
+
thinking = completion.choices[0].message.reasoning_content
|
| 244 |
+
except:
|
| 245 |
+
thinking = None
|
| 246 |
+
return answer, thinking
|
| 247 |
+
|
| 248 |
+
class ModelManagerGemini3(ModelManagerBase):
|
| 249 |
+
"""Gemini 3.0 model manager"""
|
| 250 |
+
|
| 251 |
+
def __init__(
|
| 252 |
+
self,
|
| 253 |
+
tokenizer_path: str = "model/Tokenizers/gemini",
|
| 254 |
+
context_max_length: int = 1000000, # 1M
|
| 255 |
+
url: str = "https://runway.devops.rednote.life/openai/google/v1:generateContent",
|
| 256 |
+
api_key: str = "162420e2621c480d9f8ab1bb7b8c4c91",
|
| 257 |
+
temperature: float = 1.0, # default 1.0
|
| 258 |
+
max_new_tokens: int = 32768,
|
| 259 |
+
timeout: int = 1200,
|
| 260 |
+
max_tries: int = 5,
|
| 261 |
+
time_sleep: float = 1.0,
|
| 262 |
+
):
|
| 263 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 264 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 265 |
+
self.headers = {
|
| 266 |
+
'api-key': self.api_key,
|
| 267 |
+
'Content-Type': 'application/json'
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
@model_query_decorator
|
| 271 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 272 |
+
"""Query LLM model - only handle Gemini specific logic"""
|
| 273 |
+
# prepare payload
|
| 274 |
+
payload = json.dumps({
|
| 275 |
+
"contents": [
|
| 276 |
+
{
|
| 277 |
+
"role": "user",
|
| 278 |
+
"parts": [
|
| 279 |
+
{
|
| 280 |
+
"text": processed_prompt
|
| 281 |
+
}
|
| 282 |
+
]
|
| 283 |
+
}
|
| 284 |
+
],
|
| 285 |
+
"generationConfig": {
|
| 286 |
+
"maxOutputTokens": self.max_new_tokens,
|
| 287 |
+
"temperature": self.temperature,
|
| 288 |
+
"thinkingConfig": {
|
| 289 |
+
"thinking_level": "high" # default "high"
|
| 290 |
+
}
|
| 291 |
+
}
|
| 292 |
+
})
|
| 293 |
+
|
| 294 |
+
# send request
|
| 295 |
+
response = requests.request("POST", self.url, headers=self.headers, data=payload).json()
|
| 296 |
+
|
| 297 |
+
# extract result
|
| 298 |
+
return response["candidates"][0]["content"]["parts"][0]["text"], None
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class ModelManagerGemini25(ModelManagerBase):
|
| 302 |
+
"""Gemini 2.5 thinking model manager"""
|
| 303 |
+
|
| 304 |
+
def __init__(
|
| 305 |
+
self,
|
| 306 |
+
tokenizer_path: str = "model/Tokenizers/gemini",
|
| 307 |
+
context_max_length: int = 1000000, # 1M
|
| 308 |
+
url: str = "https://runway.devops.rednote.life/openai/google/v1:generateContent",
|
| 309 |
+
api_key: str = "66c251052f44452a834ce83d0c7fd3ba",
|
| 310 |
+
temperature: float = 1.0, # default 1.0
|
| 311 |
+
max_new_tokens: int = 32768,
|
| 312 |
+
timeout: int = 1200,
|
| 313 |
+
max_tries: int = 5,
|
| 314 |
+
time_sleep: float = 1.0,
|
| 315 |
+
):
|
| 316 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 317 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 318 |
+
self.headers = {
|
| 319 |
+
'api-key': self.api_key,
|
| 320 |
+
'Content-Type': 'application/json'
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
@model_query_decorator
|
| 324 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 325 |
+
"""Query LLM model - only handle Gemini specific logic"""
|
| 326 |
+
# prepare payload
|
| 327 |
+
payload = json.dumps({
|
| 328 |
+
"contents": [
|
| 329 |
+
{
|
| 330 |
+
"role": "user",
|
| 331 |
+
"parts": [
|
| 332 |
+
{
|
| 333 |
+
"text": processed_prompt
|
| 334 |
+
}
|
| 335 |
+
]
|
| 336 |
+
}
|
| 337 |
+
],
|
| 338 |
+
"generationConfig": {
|
| 339 |
+
"maxOutputTokens": self.max_new_tokens,
|
| 340 |
+
"temperature": self.temperature,
|
| 341 |
+
"thinkingConfig": {
|
| 342 |
+
"thinkingBudget": -1 # enable auto thinking
|
| 343 |
+
}
|
| 344 |
+
}
|
| 345 |
+
})
|
| 346 |
+
|
| 347 |
+
# send request
|
| 348 |
+
response = requests.request("POST", self.url, headers=self.headers, data=payload).json()
|
| 349 |
+
|
| 350 |
+
# extract result
|
| 351 |
+
return response["candidates"][0]["content"]["parts"][0]["text"], None
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
class ModelManagerGemini25FlashNonthinking(ModelManagerBase):
|
| 355 |
+
"""Gemini 2.5 Flash nonthinking model manager"""
|
| 356 |
+
|
| 357 |
+
def __init__(
|
| 358 |
+
self,
|
| 359 |
+
tokenizer_path: str = "model/Tokenizers/gemini",
|
| 360 |
+
context_max_length: int = 1000000, # 1M
|
| 361 |
+
url: str = "https://runway.devops.rednote.life/openai/google/v1:generateContent",
|
| 362 |
+
api_key: str = "66c251052f44452a834ce83d0c7fd3ba",
|
| 363 |
+
temperature: float = 1.0, # default 1.0
|
| 364 |
+
max_new_tokens: int = 1024,
|
| 365 |
+
timeout: int = 1200,
|
| 366 |
+
max_tries: int = 5,
|
| 367 |
+
time_sleep: float = 1.0,
|
| 368 |
+
):
|
| 369 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 370 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 371 |
+
self.headers = {
|
| 372 |
+
'api-key': self.api_key,
|
| 373 |
+
'Content-Type': 'application/json'
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
@model_query_decorator
|
| 377 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 378 |
+
"""Query LLM model - only handle Gemini specific logic"""
|
| 379 |
+
# prepare payload
|
| 380 |
+
payload = json.dumps({
|
| 381 |
+
"contents": [
|
| 382 |
+
{
|
| 383 |
+
"role": "user",
|
| 384 |
+
"parts": [
|
| 385 |
+
{
|
| 386 |
+
"text": processed_prompt
|
| 387 |
+
}
|
| 388 |
+
]
|
| 389 |
+
}
|
| 390 |
+
],
|
| 391 |
+
"generationConfig": {
|
| 392 |
+
"maxOutputTokens": self.max_new_tokens,
|
| 393 |
+
"temperature": self.temperature,
|
| 394 |
+
"thinkingConfig": {
|
| 395 |
+
"thinkingBudget": 0 # disable thinking
|
| 396 |
+
}
|
| 397 |
+
}
|
| 398 |
+
})
|
| 399 |
+
|
| 400 |
+
# send request
|
| 401 |
+
response = requests.request("POST", self.url, headers=self.headers, data=payload).json()
|
| 402 |
+
|
| 403 |
+
# extract result
|
| 404 |
+
return response["candidates"][0]["content"]["parts"][0]["text"], None
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
class ModelManagerGPT5(ModelManagerBase):
|
| 408 |
+
"""GPT5 model manager"""
|
| 409 |
+
|
| 410 |
+
def __init__(
|
| 411 |
+
self,
|
| 412 |
+
tokenizer_path: str = "model/Tokenizers/gpt",
|
| 413 |
+
context_max_length: int = 262144, # 256k
|
| 414 |
+
url: str = "https://runway.devops.rednote.life/openai/chat/completions?api-version=2025-01-01-preview",
|
| 415 |
+
api_key: str = "9a7403aa383e4a44a8c0f852710630e0",
|
| 416 |
+
temperature: float = 1.0, # only support 1.0
|
| 417 |
+
max_new_tokens: int = 32768,
|
| 418 |
+
timeout: int = 1200,
|
| 419 |
+
max_tries: int = 5,
|
| 420 |
+
time_sleep: float = 1.0,
|
| 421 |
+
):
|
| 422 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 423 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 424 |
+
self.headers = {
|
| 425 |
+
'api-key': self.api_key,
|
| 426 |
+
'Content-Type': 'application/json'
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
@model_query_decorator
|
| 430 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 431 |
+
"""Query LLM model - only handle GPT5 specific logic"""
|
| 432 |
+
payload = json.dumps({
|
| 433 |
+
"messages":[
|
| 434 |
+
{
|
| 435 |
+
"role": "user",
|
| 436 |
+
"content": [
|
| 437 |
+
{
|
| 438 |
+
"type": "text",
|
| 439 |
+
"text": processed_prompt
|
| 440 |
+
}
|
| 441 |
+
]
|
| 442 |
+
}
|
| 443 |
+
],
|
| 444 |
+
"max_completion_tokens": self.max_new_tokens,
|
| 445 |
+
"temperature": self.temperature,
|
| 446 |
+
})
|
| 447 |
+
|
| 448 |
+
# send request
|
| 449 |
+
response = requests.request("POST", self.url, headers=self.headers, data=payload).json()
|
| 450 |
+
|
| 451 |
+
# extract answer
|
| 452 |
+
return response["choices"][0]["message"]["content"], None
|
| 453 |
+
|
| 454 |
+
class ModelManagerGPT4o(ModelManagerBase):
|
| 455 |
+
"""GPT4o model manager"""
|
| 456 |
+
|
| 457 |
+
def __init__(
|
| 458 |
+
self,
|
| 459 |
+
tokenizer_path: str = "model/Tokenizers/gpt",
|
| 460 |
+
context_max_length: int = 120000, # 128k - 8k
|
| 461 |
+
url: str = "https://runway.devops.rednote.life/openai/chat/completions?api-version=2025-01-01-preview",
|
| 462 |
+
api_key: str = "9d876c24a1d74e218e69339258db13a3",
|
| 463 |
+
temperature: float = 1.0,
|
| 464 |
+
max_new_tokens: int = 7168,
|
| 465 |
+
timeout: int = 1200,
|
| 466 |
+
max_tries: int = 5,
|
| 467 |
+
time_sleep: float = 1.0,
|
| 468 |
+
):
|
| 469 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 470 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 471 |
+
self.headers = {
|
| 472 |
+
'api-key': self.api_key,
|
| 473 |
+
'Content-Type': 'application/json'
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
@model_query_decorator
|
| 477 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 478 |
+
"""Query LLM model - only handle GPT4o specific logic"""
|
| 479 |
+
payload = json.dumps({
|
| 480 |
+
"messages":[
|
| 481 |
+
{
|
| 482 |
+
"role": "user",
|
| 483 |
+
"content": [
|
| 484 |
+
{
|
| 485 |
+
"type": "text",
|
| 486 |
+
"text": processed_prompt
|
| 487 |
+
}
|
| 488 |
+
]
|
| 489 |
+
}
|
| 490 |
+
],
|
| 491 |
+
"max_tokens": self.max_new_tokens,
|
| 492 |
+
"temperature": self.temperature,
|
| 493 |
+
})
|
| 494 |
+
|
| 495 |
+
# send request
|
| 496 |
+
response = requests.request("POST", self.url, headers=self.headers, data=payload).json()
|
| 497 |
+
|
| 498 |
+
# extract answer
|
| 499 |
+
return response["choices"][0]["message"]["content"], None
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
class ModelManagerClaude4(ModelManagerBase):
|
| 503 |
+
"""Claude 4 thinking model manager"""
|
| 504 |
+
|
| 505 |
+
def __init__(
|
| 506 |
+
self,
|
| 507 |
+
tokenizer_path: str = "model/Tokenizers/claude",
|
| 508 |
+
context_max_length: int = 1000000, # 1M
|
| 509 |
+
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke",
|
| 510 |
+
api_key: str = "899efa27c7c74654bb561242e1a0e423",
|
| 511 |
+
temperature: float = 1.0, # only support 1.0
|
| 512 |
+
max_new_tokens: int = 32768,
|
| 513 |
+
timeout: int = 1200,
|
| 514 |
+
max_tries: int = 5,
|
| 515 |
+
time_sleep: float = 1.0,
|
| 516 |
+
):
|
| 517 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 518 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 519 |
+
self.headers = {
|
| 520 |
+
'token': self.api_key,
|
| 521 |
+
'Content-Type': 'application/json'
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
@model_query_decorator
|
| 525 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 526 |
+
"""Query LLM model - only handle Claude4 specific logic"""
|
| 527 |
+
|
| 528 |
+
payload = json.dumps({
|
| 529 |
+
"anthropic_version": "bedrock-2023-05-31",
|
| 530 |
+
"max_tokens": self.max_new_tokens,
|
| 531 |
+
"temperature": self.temperature,
|
| 532 |
+
"anthropic_beta": ["context-1m-2025-08-07"], # support 1M context
|
| 533 |
+
"thinking": {
|
| 534 |
+
"type": "enabled",
|
| 535 |
+
"budget_tokens": self.max_new_tokens - 1024
|
| 536 |
+
},
|
| 537 |
+
"messages": [
|
| 538 |
+
{
|
| 539 |
+
"role": "user",
|
| 540 |
+
"content": [
|
| 541 |
+
{
|
| 542 |
+
"type": "text",
|
| 543 |
+
"text": processed_prompt
|
| 544 |
+
}
|
| 545 |
+
]
|
| 546 |
+
}
|
| 547 |
+
]
|
| 548 |
+
})
|
| 549 |
+
|
| 550 |
+
# send request
|
| 551 |
+
response = requests.post(self.url, headers=self.headers, data=payload).json()
|
| 552 |
+
|
| 553 |
+
# extract answer
|
| 554 |
+
return response["content"][1]["text"], response["content"][0]["thinking"]
|
| 555 |
+
|
| 556 |
+
class ModelManagerClaude4Nonthinking(ModelManagerBase):
|
| 557 |
+
"""Claude 4 nonthinking model manager"""
|
| 558 |
+
|
| 559 |
+
def __init__(
|
| 560 |
+
self,
|
| 561 |
+
tokenizer_path: str = "model/Tokenizers/claude",
|
| 562 |
+
context_max_length: int = 1000000, # 1M
|
| 563 |
+
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke",
|
| 564 |
+
api_key: str = "899efa27c7c74654bb561242e1a0e423",
|
| 565 |
+
temperature: float = 1.0, # only support 1.0
|
| 566 |
+
max_new_tokens: int = 1024,
|
| 567 |
+
timeout: int = 1200,
|
| 568 |
+
max_tries: int = 5,
|
| 569 |
+
time_sleep: float = 1.0,
|
| 570 |
+
):
|
| 571 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 572 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 573 |
+
self.headers = {
|
| 574 |
+
'token': self.api_key,
|
| 575 |
+
'Content-Type': 'application/json'
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
@model_query_decorator
|
| 579 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 580 |
+
"""Query LLM model - only handle Claude4 specific logic"""
|
| 581 |
+
|
| 582 |
+
payload = json.dumps({
|
| 583 |
+
"anthropic_version": "bedrock-2023-05-31",
|
| 584 |
+
"max_tokens": self.max_new_tokens,
|
| 585 |
+
"temperature": self.temperature,
|
| 586 |
+
"anthropic_beta": ["context-1m-2025-08-07"], # support 1M context
|
| 587 |
+
"thinking": {
|
| 588 |
+
"type": "disabled",
|
| 589 |
+
},
|
| 590 |
+
"messages": [
|
| 591 |
+
{
|
| 592 |
+
"role": "user",
|
| 593 |
+
"content": [
|
| 594 |
+
{
|
| 595 |
+
"type": "text",
|
| 596 |
+
"text": processed_prompt
|
| 597 |
+
}
|
| 598 |
+
]
|
| 599 |
+
}
|
| 600 |
+
]
|
| 601 |
+
})
|
| 602 |
+
|
| 603 |
+
# send request
|
| 604 |
+
response = requests.post(self.url, headers=self.headers, data=payload).json()
|
| 605 |
+
|
| 606 |
+
# extract answer
|
| 607 |
+
return response["content"][0]["text"], None
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
class ModelManagerClaude37(ModelManagerBase):
|
| 611 |
+
"""Claude 3.7 thinking model manager"""
|
| 612 |
+
|
| 613 |
+
def __init__(
|
| 614 |
+
self,
|
| 615 |
+
tokenizer_path: str = "model/Tokenizers/claude",
|
| 616 |
+
context_max_length: int = 200000,
|
| 617 |
+
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke",
|
| 618 |
+
api_key: str = "ff15724dce4d4c1e95939efd2f40628f",
|
| 619 |
+
temperature: float = 1.0, # only support 1.0
|
| 620 |
+
max_new_tokens: int = 32768,
|
| 621 |
+
timeout: int = 1200,
|
| 622 |
+
max_tries: int = 5,
|
| 623 |
+
time_sleep: float = 1.0,
|
| 624 |
+
):
|
| 625 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 626 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 627 |
+
self.headers = {
|
| 628 |
+
'token': self.api_key,
|
| 629 |
+
'Content-Type': 'application/json'
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
@model_query_decorator
|
| 633 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 634 |
+
"""Query LLM model - only handle Claude4 specific logic"""
|
| 635 |
+
|
| 636 |
+
payload = json.dumps({
|
| 637 |
+
"anthropic_version": "bedrock-2023-05-31",
|
| 638 |
+
"max_tokens": self.max_new_tokens,
|
| 639 |
+
"temperature": self.temperature,
|
| 640 |
+
"thinking": {
|
| 641 |
+
"type": "enabled",
|
| 642 |
+
"budget_tokens": self.max_new_tokens - 200
|
| 643 |
+
},
|
| 644 |
+
"messages": [
|
| 645 |
+
{
|
| 646 |
+
"role": "user",
|
| 647 |
+
"content": [
|
| 648 |
+
{
|
| 649 |
+
"type": "text",
|
| 650 |
+
"text": processed_prompt
|
| 651 |
+
}
|
| 652 |
+
]
|
| 653 |
+
}
|
| 654 |
+
]
|
| 655 |
+
})
|
| 656 |
+
|
| 657 |
+
# send request
|
| 658 |
+
response = requests.post(self.url, headers=self.headers, data=payload).json()
|
| 659 |
+
|
| 660 |
+
# extract answer
|
| 661 |
+
return response["content"][1]["text"], response["content"][0]["thinking"]
|
| 662 |
+
|
| 663 |
+
class ModelManagerClaude37Nonthinking(ModelManagerBase):
|
| 664 |
+
"""Claude 3.7 nonthinking model manager"""
|
| 665 |
+
|
| 666 |
+
def __init__(
|
| 667 |
+
self,
|
| 668 |
+
tokenizer_path: str = "model/Tokenizers/claude",
|
| 669 |
+
context_max_length: int = 200000,
|
| 670 |
+
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke",
|
| 671 |
+
api_key: str = "ff15724dce4d4c1e95939efd2f40628f",
|
| 672 |
+
temperature: float = 1.0, # only support 1.0
|
| 673 |
+
max_new_tokens: int = 1024,
|
| 674 |
+
timeout: int = 1200,
|
| 675 |
+
max_tries: int = 5,
|
| 676 |
+
time_sleep: float = 1.0,
|
| 677 |
+
):
|
| 678 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 679 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 680 |
+
self.headers = {
|
| 681 |
+
'token': self.api_key,
|
| 682 |
+
'Content-Type': 'application/json'
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
@model_query_decorator
|
| 686 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 687 |
+
"""Query LLM model - only handle Claude4 specific logic"""
|
| 688 |
+
|
| 689 |
+
payload = json.dumps({
|
| 690 |
+
"anthropic_version": "bedrock-2023-05-31",
|
| 691 |
+
"max_tokens": self.max_new_tokens,
|
| 692 |
+
"temperature": self.temperature,
|
| 693 |
+
"thinking": {
|
| 694 |
+
"type": "disabled",
|
| 695 |
+
},
|
| 696 |
+
"messages": [
|
| 697 |
+
{
|
| 698 |
+
"role": "user",
|
| 699 |
+
"content": [
|
| 700 |
+
{
|
| 701 |
+
"type": "text",
|
| 702 |
+
"text": processed_prompt
|
| 703 |
+
}
|
| 704 |
+
]
|
| 705 |
+
}
|
| 706 |
+
]
|
| 707 |
+
})
|
| 708 |
+
|
| 709 |
+
# send request
|
| 710 |
+
response = requests.post(self.url, headers=self.headers, data=payload).json()
|
| 711 |
+
|
| 712 |
+
# extract answer
|
| 713 |
+
return response["content"][0]["text"], None
|
| 714 |
+
|
| 715 |
+
class ModelManagerKimi(ModelManagerBase):
|
| 716 |
+
"""Kimi model manager"""
|
| 717 |
+
|
| 718 |
+
def __init__(
|
| 719 |
+
self,
|
| 720 |
+
tokenizer_path: str = "/cpfs/user/chengfeng/huggingface/models/moonshotai/Kimi-K2-Instruct",
|
| 721 |
+
context_max_length: int = 224000, # 256k - 32k
|
| 722 |
+
url: str = "https://runway.devops.xiaohongshu.com/openai/moonshot/v1/chat/completions",
|
| 723 |
+
api_key: str = "ea70f961e2e94024b0e8a2037ae9b477",
|
| 724 |
+
temperature: float = 0.6,
|
| 725 |
+
max_new_tokens: int = 32768,
|
| 726 |
+
timeout: int = 1200,
|
| 727 |
+
max_tries: int = 5,
|
| 728 |
+
time_sleep: float = 1.0,
|
| 729 |
+
):
|
| 730 |
+
super().__init__(tokenizer_path, context_max_length, url, api_key,
|
| 731 |
+
temperature, max_new_tokens, timeout, max_tries, time_sleep)
|
| 732 |
+
self.headers = {
|
| 733 |
+
'api-key': self.api_key,
|
| 734 |
+
'Content-Type': 'application/json'
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
@model_query_decorator
|
| 738 |
+
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]:
|
| 739 |
+
"""Query LLM model - only handle Kimi specific logic"""
|
| 740 |
+
payload = json.dumps({
|
| 741 |
+
"model": "kimi-k2-0905-preview",
|
| 742 |
+
"messages":[
|
| 743 |
+
{
|
| 744 |
+
"role": "user",
|
| 745 |
+
"content": [
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": processed_prompt
|
| 749 |
+
}
|
| 750 |
+
]
|
| 751 |
+
}
|
| 752 |
+
],
|
| 753 |
+
"max_tokens": self.max_new_tokens,
|
| 754 |
+
"temperature": self.temperature,
|
| 755 |
+
"timeout": self.timeout
|
| 756 |
+
})
|
| 757 |
+
|
| 758 |
+
# send request
|
| 759 |
+
response = requests.post(self.url, headers=self.headers, data=payload).json()
|
| 760 |
+
|
| 761 |
+
# extract answer
|
| 762 |
+
return response["choices"][0]["message"]["content"], None
|
| 763 |
+
|
| 764 |
+
if __name__ == "__main__":
|
| 765 |
+
model_manager = ModelManagerGemini3()
|
| 766 |
+
answer, thinking = model_manager.query("Hello, how are you?")
|
| 767 |
+
print("Answer:", answer)
|
| 768 |
+
print("Thinking:", thinking)
|
| 769 |
+
|
eval/modules/utils.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict, Set, Optional
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
import jieba
|
| 4 |
+
from rouge import Rouge
|
| 5 |
+
import pytrec_eval
|
| 6 |
+
from itertools import combinations
|
| 7 |
+
|
| 8 |
+
'''
|
| 9 |
+
normalize text
|
| 10 |
+
'''
|
| 11 |
+
|
| 12 |
+
def get_answer_area(text: str) -> str:
|
| 13 |
+
if "[Answer]" in text or "[答案]" in text:
|
| 14 |
+
if "[Answer]" in text:
|
| 15 |
+
last_answer_start: int = text.rfind('[Answer]')
|
| 16 |
+
if last_answer_start != -1:
|
| 17 |
+
text = text[last_answer_start + 8:]
|
| 18 |
+
else:
|
| 19 |
+
last_answer_start: int = text.rfind('[答案]')
|
| 20 |
+
if last_answer_start != -1:
|
| 21 |
+
text = text[last_answer_start + 4:]
|
| 22 |
+
return text.strip()
|
| 23 |
+
|
| 24 |
+
def lower(text: str) -> str:
|
| 25 |
+
return text.lower()
|
| 26 |
+
|
| 27 |
+
def split_by_new_line(text: str) -> List[str]:
|
| 28 |
+
return text.split("\n")
|
| 29 |
+
|
| 30 |
+
def fix_space(text: str) -> str:
|
| 31 |
+
'''
|
| 32 |
+
1. Can not remove all spaces in the answer. For example: "1 11" != "11 1" but "111" == "111"
|
| 33 |
+
2. One sample answers contained multiple spaces; this operation has a very low probability of affecting the judgment.
|
| 34 |
+
'''
|
| 35 |
+
return ' '.join(text.split())
|
| 36 |
+
|
| 37 |
+
def normalize_answers(answers: List[str]) -> List[str]:
|
| 38 |
+
return [fix_space(lower(a).strip()) for a in answers]
|
| 39 |
+
|
| 40 |
+
def normalize_prediction(prediction: str) -> List[str]:
|
| 41 |
+
return [fix_space(p.strip()) for p in split_by_new_line(lower(get_answer_area(prediction)))]
|
| 42 |
+
|
| 43 |
+
def normalize_prediction_abstract(abstract: str) -> str:
|
| 44 |
+
return fix_space(lower(abstract).strip())
|
| 45 |
+
|
| 46 |
+
'''
|
| 47 |
+
metrics
|
| 48 |
+
'''
|
| 49 |
+
def Accuracy(answers: List[str], prediction: str) -> float:
|
| 50 |
+
answers: List[str] = normalize_answers(answers)
|
| 51 |
+
predictions: List[str] = normalize_prediction(prediction)
|
| 52 |
+
|
| 53 |
+
if len(answers) == 0 or len(predictions) == 0:
|
| 54 |
+
return 0.0
|
| 55 |
+
|
| 56 |
+
if answers[0] == predictions[0]:
|
| 57 |
+
return 1.0
|
| 58 |
+
else:
|
| 59 |
+
return 0.0
|
| 60 |
+
|
| 61 |
+
def F1_Score(answers: List[str], prediction: str) -> float:
|
| 62 |
+
answers: List[str] = normalize_answers(answers)
|
| 63 |
+
predictions: List[str] = normalize_prediction(prediction)
|
| 64 |
+
|
| 65 |
+
answer_set: Set[str] = set(answers)
|
| 66 |
+
prediction_set: Set[str] = set(predictions)
|
| 67 |
+
|
| 68 |
+
common: Set[str] = answer_set & prediction_set
|
| 69 |
+
if len(common) == 0 or len(prediction_set) == 0 or len(answer_set) == 0:
|
| 70 |
+
return 0.0
|
| 71 |
+
|
| 72 |
+
precision: float = len(common) / len(prediction_set)
|
| 73 |
+
recall: float = len(common) / len(answer_set)
|
| 74 |
+
|
| 75 |
+
if precision + recall == 0:
|
| 76 |
+
return 0.0
|
| 77 |
+
|
| 78 |
+
f1: float = (2 * precision * recall) / (precision + recall)
|
| 79 |
+
return f1
|
| 80 |
+
|
| 81 |
+
def SubEM(answers: List[str], prediction: str) -> float:
|
| 82 |
+
answers: List[str] = normalize_answers(answers)
|
| 83 |
+
predictions: List[str] = normalize_prediction(prediction)
|
| 84 |
+
|
| 85 |
+
if len(answers) == 0 or len(predictions) == 0:
|
| 86 |
+
return 0.0
|
| 87 |
+
|
| 88 |
+
score: float = 0.0
|
| 89 |
+
for a in answers:
|
| 90 |
+
if a in predictions:
|
| 91 |
+
score += 1.0
|
| 92 |
+
return score / len(answers)
|
| 93 |
+
|
| 94 |
+
# Rouge: https://github.com/pltrdy/rouge
|
| 95 |
+
def Summary_Max_Rouge_L(answers: List[str], prediction: str, is_zh: bool) -> float:
|
| 96 |
+
if is_zh:
|
| 97 |
+
answers = [" ".join(list(jieba.cut(a, cut_all=False))) for a in answers]
|
| 98 |
+
prediction = " ".join(list(jieba.cut(prediction, cut_all=False)))
|
| 99 |
+
|
| 100 |
+
rouge_evaluator = Rouge()
|
| 101 |
+
try:
|
| 102 |
+
scores = rouge_evaluator.get_scores([prediction] * len(answers), answers, avg=False)
|
| 103 |
+
except:
|
| 104 |
+
return 0.0
|
| 105 |
+
|
| 106 |
+
return max([score["rouge-l"]["f"] for score in scores])
|
| 107 |
+
|
| 108 |
+
def Summary_Max_Semantic_Similarity(Embedding_Model, answers: List[str], prediction: str) -> float:
|
| 109 |
+
answer_embeddings = Embedding_Model.encode(answers)
|
| 110 |
+
prediction_embeddings = Embedding_Model.encode([prediction])
|
| 111 |
+
|
| 112 |
+
# Compute the cosine similarity between the answer and prediction embeddings
|
| 113 |
+
similarity = Embedding_Model.similarity(answer_embeddings, prediction_embeddings) # 3 * 1
|
| 114 |
+
return float(similarity.max().cpu().numpy())
|
| 115 |
+
|
| 116 |
+
def Summary(Embedding_Model, answers: List[str], prediction: str, is_zh: bool, alpha: float = 0.5, beta: float = 0.5) -> float:
|
| 117 |
+
answers: List[str] = normalize_answers(answers)
|
| 118 |
+
prediction: str = normalize_prediction_abstract(prediction)
|
| 119 |
+
|
| 120 |
+
if len(answers) == 0 or not prediction:
|
| 121 |
+
return 0.0
|
| 122 |
+
|
| 123 |
+
return alpha * Summary_Max_Semantic_Similarity(Embedding_Model, answers, prediction) + beta * Summary_Max_Rouge_L(answers, prediction, is_zh)
|
| 124 |
+
|
| 125 |
+
# NDCG@k: https://github.com/beir-cellar/beir/blob/f062f038c4bfd19a8ca942a9910b1e0d218759d4/beir/retrieval/evaluation.py#L67 use pytrec_eval
|
| 126 |
+
def NDCG(answers: List[str], prediction: str) -> float:
|
| 127 |
+
answers: List[str] = normalize_answers(answers)
|
| 128 |
+
predictions: List[str] = normalize_prediction(prediction)
|
| 129 |
+
|
| 130 |
+
if len(answers) == 0 or len(predictions) == 0:
|
| 131 |
+
return 0.0
|
| 132 |
+
|
| 133 |
+
k_value = len(answers)
|
| 134 |
+
|
| 135 |
+
answers = {
|
| 136 |
+
'query': {a: len(answers) - i for i, a in enumerate(answers)}
|
| 137 |
+
}
|
| 138 |
+
predictions = {
|
| 139 |
+
'query': {p: len(predictions) - i for i, p in enumerate(predictions)}
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
ndcg = 0.0
|
| 143 |
+
ndcg_string = "ndcg_cut." + str(k_value)
|
| 144 |
+
evaluator = pytrec_eval.RelevanceEvaluator(answers, {ndcg_string})
|
| 145 |
+
scores = evaluator.evaluate(predictions)
|
| 146 |
+
|
| 147 |
+
for query_id in scores.keys():
|
| 148 |
+
ndcg += scores[query_id]["ndcg_cut_" + str(k_value)]
|
| 149 |
+
|
| 150 |
+
ndcg = ndcg / len(scores)
|
| 151 |
+
|
| 152 |
+
return ndcg
|
| 153 |
+
|
| 154 |
+
def Pairwise_Accuracy(answers: List[str], prediction: str) -> float:
|
| 155 |
+
answers: List[str] = normalize_answers(answers)
|
| 156 |
+
predictions: List[str] = normalize_prediction(prediction)
|
| 157 |
+
|
| 158 |
+
if len(answers) == 0 or len(answers) == 1 or len(predictions) == 0 or len(predictions) == 1:
|
| 159 |
+
return 0.0
|
| 160 |
+
|
| 161 |
+
n_total: int = len(predictions) * (len(predictions) - 1) // 2 # calculate all possible pairs of predictions
|
| 162 |
+
prediction_indices: Dict[str, int] = {p:i for i, p in enumerate(predictions)}
|
| 163 |
+
n_correct: int = 0
|
| 164 |
+
|
| 165 |
+
for a, b in combinations(answers, 2):
|
| 166 |
+
if a in prediction_indices and b in prediction_indices:
|
| 167 |
+
if prediction_indices[a] < prediction_indices[b]:
|
| 168 |
+
n_correct += 1
|
| 169 |
+
|
| 170 |
+
return n_correct / n_total
|
| 171 |
+
|
| 172 |
+
'''
|
| 173 |
+
calculate metrics
|
| 174 |
+
'''
|
| 175 |
+
def get_average_overall_results(data, bon_num):
|
| 176 |
+
"""get average overall results for all inference iterations"""
|
| 177 |
+
overall_results = defaultdict(list)
|
| 178 |
+
for item in data:
|
| 179 |
+
overall_results[item['id']].append(item)
|
| 180 |
+
|
| 181 |
+
average_overall_results = []
|
| 182 |
+
inference_inconsistent_samples_num = 0
|
| 183 |
+
for _, items in overall_results.items():
|
| 184 |
+
if len(items) != bon_num:
|
| 185 |
+
inference_inconsistent_samples_num += 1
|
| 186 |
+
tmp_item = items[0].copy()
|
| 187 |
+
tmp_item['metric'] = sum(item['metric'] for item in items) / len(items)
|
| 188 |
+
average_overall_results.append(tmp_item)
|
| 189 |
+
return average_overall_results, inference_inconsistent_samples_num
|
| 190 |
+
|
| 191 |
+
def get_inference_iteration_idx_results(data, inference_iteration_idx):
|
| 192 |
+
"""get results for the idx-th inference iteration"""
|
| 193 |
+
inference_iteration_idx_results = []
|
| 194 |
+
for item in data:
|
| 195 |
+
if item['bon_idx'] != inference_iteration_idx:
|
| 196 |
+
continue
|
| 197 |
+
inference_iteration_idx_results.append(item)
|
| 198 |
+
return inference_iteration_idx_results
|
| 199 |
+
|
| 200 |
+
def get_best_of_n_results(data, bon_num):
|
| 201 |
+
"""get best of n results for each question"""
|
| 202 |
+
best_of_n_results = {} # save the best result of BoN-bon_num for each question
|
| 203 |
+
for item in data:
|
| 204 |
+
if item['bon_idx'] > bon_num:
|
| 205 |
+
continue
|
| 206 |
+
if item['id'] not in best_of_n_results or item['metric'] > best_of_n_results[item['id']]['metric']:
|
| 207 |
+
best_of_n_results[item['id']] = item
|
| 208 |
+
return list(best_of_n_results.values())
|
| 209 |
+
|
| 210 |
+
def calculate_pass_n_metrics(best_of_n_results):
|
| 211 |
+
"""
|
| 212 |
+
calculate pass@n metrics
|
| 213 |
+
Summary Threshold = 0.5 * 0.5(Rouge-L) + 0.5 * 0.8(Semantic Similarity) = 0.65
|
| 214 |
+
if the summary score is greater than 0.65, it is considered to pass
|
| 215 |
+
"""
|
| 216 |
+
pass_sample_num = 0
|
| 217 |
+
for best_of_n_result in best_of_n_results:
|
| 218 |
+
if 'T4' in best_of_n_result['primary_task']:
|
| 219 |
+
if best_of_n_result['metric'] > 0.65:
|
| 220 |
+
pass_sample_num += 1
|
| 221 |
+
else:
|
| 222 |
+
if best_of_n_result['metric'] == 1.0:
|
| 223 |
+
pass_sample_num += 1
|
| 224 |
+
return pass_sample_num / len(best_of_n_results)
|
| 225 |
+
|
| 226 |
+
def calculate_overall_metrics(metric_results):
|
| 227 |
+
"""calculate overall metrics"""
|
| 228 |
+
if not metric_results:
|
| 229 |
+
return 0.0
|
| 230 |
+
|
| 231 |
+
metrics = [metric_result['metric'] for metric_result in metric_results]
|
| 232 |
+
return sum(metrics) / len(metrics)
|
| 233 |
+
|
| 234 |
+
def calculate_dimension_metrics(metric_results, dimension, sort_keys):
|
| 235 |
+
"""calculate metrics for each dimension"""
|
| 236 |
+
dimension_groups = defaultdict(list)
|
| 237 |
+
|
| 238 |
+
for metric_result in metric_results:
|
| 239 |
+
value = metric_result[dimension]
|
| 240 |
+
dimension_groups[value].append(metric_result['metric'])
|
| 241 |
+
|
| 242 |
+
results = {}
|
| 243 |
+
for value, metrics in dimension_groups.items():
|
| 244 |
+
results[value] = sum(metrics) / len(metrics)
|
| 245 |
+
|
| 246 |
+
return {key: results[key] for key in sort_keys}
|
eval/output/Claude-3.7-Sonnet/nonthinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.5144730485997339,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.5192628714494713,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5090899475543829,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.515066326795347,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.5927607589235461,
|
| 14 |
+
"16k": 0.5922491004183165,
|
| 15 |
+
"32k": 0.5555486925170308,
|
| 16 |
+
"64k": 0.4991997081584744,
|
| 17 |
+
"128k": 0.45285894052515324,
|
| 18 |
+
"256k": 0.39422109105588254
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.47584256909012274,
|
| 22 |
+
"Partial": 0.5636391134301498
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.6868572950582806,
|
| 26 |
+
"Moderate": 0.48375113564429373,
|
| 27 |
+
"Hard": 0.4728683670759167,
|
| 28 |
+
"Extreme": 0.3731393645349295
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7586982224527067,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7545327049493711,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.5277777777777779,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5250996637138268,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5254132304220211,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.47394883159992857,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.3040021982475052,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.41188271604938276,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.6042705189653765,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.5114814814814815,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.43888888888888894
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5100199196277736,
|
| 45 |
+
"English": 0.5189261775716956
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.5192628714494713,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.5937761874854375,
|
| 51 |
+
"16k": 0.606154781504802,
|
| 52 |
+
"32k": 0.5701163293545726,
|
| 53 |
+
"64k": 0.49747085680734393,
|
| 54 |
+
"128k": 0.4476635155122931,
|
| 55 |
+
"256k": 0.40039555803238175
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.47604900489990065,
|
| 59 |
+
"Partial": 0.5742623379671082
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.6993477849390543,
|
| 63 |
+
"Moderate": 0.4824572359285609,
|
| 64 |
+
"Hard": 0.47426941765067004,
|
| 65 |
+
"Extreme": 0.37571516352087697
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7482072090157142,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7523087560587559,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.5333333333333333,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5249609995597003,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.5307787048666445,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.47337460590728553,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.30808530916861365,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.40740740740740744,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.6209514621148434,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.5469444444444443,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.45
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.5151862466463957,
|
| 82 |
+
"English": 0.5233394962525484
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.2673333333333333,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5638452937577435,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.6334745299899752,
|
| 90 |
+
"16k": 0.6535009894669588,
|
| 91 |
+
"32k": 0.6109603205298609,
|
| 92 |
+
"64k": 0.5566414838337063,
|
| 93 |
+
"128k": 0.5030500434216845,
|
| 94 |
+
"256k": 0.4254443953042751
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.523183868231744,
|
| 98 |
+
"Partial": 0.6155961989726518
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.7554808778953406,
|
| 102 |
+
"Moderate": 0.5292531239954449,
|
| 103 |
+
"Hard": 0.5124984336029716,
|
| 104 |
+
"Extreme": 0.41036353942072434
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.8018540164669292,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.7873358123358121,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5666666666666667,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5413926274234249,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.5675265408978069,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5409163851157314,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.34558583778191654,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.4685185185185185,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.6508982849457906,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.6081944444444445,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.49166666666666664
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.5545031574164562,
|
| 121 |
+
"English": 0.5731874300990306
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.30533333333333335,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.5987860690936202,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.6767030215651172,
|
| 129 |
+
"16k": 0.6801366595488965,
|
| 130 |
+
"32k": 0.6345247903374839,
|
| 131 |
+
"64k": 0.6039272497657204,
|
| 132 |
+
"128k": 0.5233376257525678,
|
| 133 |
+
"256k": 0.47408706759193875
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.5614328850984434,
|
| 137 |
+
"Partial": 0.6463264850874838
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.7882244354415668,
|
| 141 |
+
"Moderate": 0.5766160107480841,
|
| 142 |
+
"Hard": 0.5655081578600745,
|
| 143 |
+
"Extreme": 0.42768418415872295
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.8178727620501064,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.8166698116698113,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6166666666666667,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5469270547891242,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.581425502230592,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.5699768544212985,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.3875679491876082,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.5462962962962963,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.6792246386283735,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.6452777777777778,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.525
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.5867683815613326,
|
| 160 |
+
"English": 0.6108037566259096
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.3433333333333333
|
| 164 |
+
}
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_1-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_2-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_4-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_5-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_7-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_inference_8-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/nonthinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.5167049903246114,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.5175993160365915,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5135807596157895,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.5189348953214519,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.5691616296699119,
|
| 14 |
+
"16k": 0.5676134549372556,
|
| 15 |
+
"32k": 0.5289760437098003,
|
| 16 |
+
"64k": 0.5015696259811485,
|
| 17 |
+
"128k": 0.4857001095282947,
|
| 18 |
+
"256k": 0.44720907812125815
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.48158748704864085,
|
| 22 |
+
"Partial": 0.5613999944940294
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.6212240545937193,
|
| 26 |
+
"Moderate": 0.5136346834318135,
|
| 27 |
+
"Hard": 0.5163049021668649,
|
| 28 |
+
"Extreme": 0.4044885248516518
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.7713866456415331,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.762687420604087,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.5333333333333333,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5515992544844097,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.5944535310423185,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.4789878465188747,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.39465891182344254,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.2149691358024691,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.6451135379199672,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.49787037037037035,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.46944444444444444
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5273254322066815,
|
| 45 |
+
"English": 0.5060845484425422
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.5175993160365915,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.5773249164903291,
|
| 51 |
+
"16k": 0.5604627326667504,
|
| 52 |
+
"32k": 0.5374659261246943,
|
| 53 |
+
"64k": 0.5074991434398594,
|
| 54 |
+
"128k": 0.4702901190904467,
|
| 55 |
+
"256k": 0.45255305840747634
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.4801143243240927,
|
| 59 |
+
"Partial": 0.5653074873070485
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.6321632804118208,
|
| 63 |
+
"Moderate": 0.5036215268809261,
|
| 64 |
+
"Hard": 0.5164886663122836,
|
| 65 |
+
"Extreme": 0.40201006150807705
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7671950408606149,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7640499777999779,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.5833333333333334,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5488326711816013,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.58422686569879,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.47446486157270457,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.3846286380881271,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.2226851851851852,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.6587133120918426,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.5076388888888889,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.43333333333333335
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.5202198683535451,
|
| 82 |
+
"English": 0.5149787637196416
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.24533333333333332,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5858993921620037,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.6332517244583726,
|
| 90 |
+
"16k": 0.6310887297252004,
|
| 91 |
+
"32k": 0.6114096243459353,
|
| 92 |
+
"64k": 0.5723573466459502,
|
| 93 |
+
"128k": 0.5523069746695444,
|
| 94 |
+
"256k": 0.5149819531270248
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5499254609160144,
|
| 98 |
+
"Partial": 0.6316843955659928
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.6997370485006186,
|
| 102 |
+
"Moderate": 0.578931807315116,
|
| 103 |
+
"Hard": 0.59328992133015,
|
| 104 |
+
"Extreme": 0.46091761656187924
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.8076556299673503,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.8115853128353127,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.65,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5673030178914519,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.6729248677257385,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5463889541830715,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.4736347042901523,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.2773148148148148,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.7103491970064771,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.5745833333333332,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5833333333333334
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.5980713707246905,
|
| 121 |
+
"English": 0.5737274135993191
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.312,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.6250975643039252,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.6624896013016575,
|
| 129 |
+
"16k": 0.667371637798531,
|
| 130 |
+
"32k": 0.6458484887458542,
|
| 131 |
+
"64k": 0.619893995338142,
|
| 132 |
+
"128k": 0.5995137452171235,
|
| 133 |
+
"256k": 0.5554679174222451
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.5850937206249248,
|
| 137 |
+
"Partial": 0.6760115471681097
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.7532527253719965,
|
| 141 |
+
"Moderate": 0.6215376875723243,
|
| 142 |
+
"Hard": 0.6270141217985696,
|
| 143 |
+
"Extreme": 0.48578877254181324
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.8312391426962816,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.8401638639138637,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.725,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5737811360033489,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.7032234702446885,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.5739851542792719,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.5085181572307016,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.33425925925925926,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.7396125292314827,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.6588888888888889,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.6166666666666667
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.6402707582944462,
|
| 160 |
+
"English": 0.6099243703134061
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.3526666666666667
|
| 164 |
+
}
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_1-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_2-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_3-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_4-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_5-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_6-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_7-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_inference_8-of-8.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/output/DeepSeek-V3.2/thinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.6782077426413915,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.671629754030229,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.6777556491690084,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.6852378247249357,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.755369154280727,
|
| 14 |
+
"16k": 0.7449467265987637,
|
| 15 |
+
"32k": 0.6953336880653428,
|
| 16 |
+
"64k": 0.6946800210314833,
|
| 17 |
+
"128k": 0.6477035080898761,
|
| 18 |
+
"256k": 0.5312133577821595
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.6459619783148297,
|
| 22 |
+
"Partial": 0.7192478063297452
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.8502179380964533,
|
| 26 |
+
"Moderate": 0.7507860067400632,
|
| 27 |
+
"Hard": 0.6772551692268365,
|
| 28 |
+
"Extreme": 0.4427333362390087
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.8628300416379104,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.8633894500561163,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.6277777777777778,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5645627813985595,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.7367830500533472,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.6168551563610202,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.5431477714039084,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.6640432098765434,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.7821104015574073,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.6818518518518517,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.622222222222222
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.6775197474946019,
|
| 45 |
+
"English": 0.6788957377881832
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.671629754030229,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.7397920433374541,
|
| 51 |
+
"16k": 0.7269924173975423,
|
| 52 |
+
"32k": 0.7007145536231846,
|
| 53 |
+
"64k": 0.6696695962094932,
|
| 54 |
+
"128k": 0.655131428243527,
|
| 55 |
+
"256k": 0.5374784853701818
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.6384885103680042,
|
| 59 |
+
"Partial": 0.7138095186912471
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.8524722619644284,
|
| 63 |
+
"Moderate": 0.7391126766592921,
|
| 64 |
+
"Hard": 0.6576844523580323,
|
| 65 |
+
"Extreme": 0.4383605238465565
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.8608914983834914,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.8649913049913043,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.6166666666666667,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5629979913624931,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.727192234350903,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.6142105299342737,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.5448247044942024,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.6222222222222223,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.7868571557580843,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.7038888888888889,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.6
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.6695591460858414,
|
| 82 |
+
"English": 0.6737003619746216
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.442,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.739062545668539,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.7985986496818439,
|
| 90 |
+
"16k": 0.8081758798304621,
|
| 91 |
+
"32k": 0.7769565912085228,
|
| 92 |
+
"64k": 0.7312553059908137,
|
| 93 |
+
"128k": 0.7215196949254835,
|
| 94 |
+
"256k": 0.5978691523741125
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.7083303283149,
|
| 98 |
+
"Partial": 0.7781762768459
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.9085052195225997,
|
| 102 |
+
"Moderate": 0.8289156864861236,
|
| 103 |
+
"Hard": 0.7590532764337315,
|
| 104 |
+
"Extreme": 0.4812983463842697
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.8977317620746387,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.9110066322566314,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.725,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5777725397440469,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.807338444836897,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.6747522573464864,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.6096717826867489,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.7398148148148148,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.8172408263391231,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.7575,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.7083333333333334
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.7379779669884229,
|
| 121 |
+
"English": 0.7401471243486567
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.5286666666666666,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.769484517884144,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.8183989834849181,
|
| 129 |
+
"16k": 0.8287125912826261,
|
| 130 |
+
"32k": 0.8000120047613876,
|
| 131 |
+
"64k": 0.7929440904260506,
|
| 132 |
+
"128k": 0.761968193898885,
|
| 133 |
+
"256k": 0.6148712434509996
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.7396412839189731,
|
| 137 |
+
"Partial": 0.8074668156579995
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.9315740410553816,
|
| 141 |
+
"Moderate": 0.8685767211690967,
|
| 142 |
+
"Hard": 0.8013938214601453,
|
| 143 |
+
"Extreme": 0.5058786414037999
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.904701887970005,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.9204745254745246,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.7583333333333333,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5854934073644178,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.8343370156947629,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.6936792642304824,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.6489698568119598,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.789814814814815,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.8323537332622081,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.8091666666666666,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.775
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.76992326145198,
|
| 160 |
+
"English": 0.7690457743163093
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.572
|
| 164 |
+
}
|
eval/output/GPT-5/thinking_context-262144_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.726053089253122,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.7242860759291603,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.72436075729001,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.729512434540192,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.7537078410340138,
|
| 14 |
+
"16k": 0.7627066310839429,
|
| 15 |
+
"32k": 0.7434290864816196,
|
| 16 |
+
"64k": 0.7646193918174649,
|
| 17 |
+
"128k": 0.6936202889645278,
|
| 18 |
+
"256k": 0.638235296137159
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.6915568234658586,
|
| 22 |
+
"Partial": 0.7699574275278195
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.8523326045847652,
|
| 26 |
+
"Moderate": 0.8231088494697211,
|
| 27 |
+
"Hard": 0.787367547123676,
|
| 28 |
+
"Extreme": 0.4836991814871219
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.9032376385150938,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.9075063054229715,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.6666666666666666,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5256066584699448,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.8116994715897818,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.6716265654111317,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.631179283519898,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.7979938271604939,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.818404768269679,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.6802314814814814,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.6111111111111112
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.7196645097291159,
|
| 45 |
+
"English": 0.7324416687771269
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.7242860759291603,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.7638228227994025,
|
| 51 |
+
"16k": 0.7511485364018967,
|
| 52 |
+
"32k": 0.7397315002658593,
|
| 53 |
+
"64k": 0.7648062624572959,
|
| 54 |
+
"128k": 0.6947065191324134,
|
| 55 |
+
"256k": 0.6315008145180959
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.6845638507619599,
|
| 59 |
+
"Partial": 0.7748416352328712
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.8419121655420269,
|
| 63 |
+
"Moderate": 0.8140896757444649,
|
| 64 |
+
"Hard": 0.8018107002313927,
|
| 65 |
+
"Extreme": 0.4855278214669571
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.9022908711992736,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.9003492803492802,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.6666666666666666,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.525285592483348,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.8350389199886978,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.6728116198035761,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.6250527729039961,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.7824074074074074,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.8228424738103258,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.6890277777777778,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5916666666666667
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.7225808137285838,
|
| 82 |
+
"English": 0.725991338129738
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.5033333333333333,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.773365567880672,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.7988567725267066,
|
| 90 |
+
"16k": 0.7953552672252621,
|
| 91 |
+
"32k": 0.7853032014648265,
|
| 92 |
+
"64k": 0.8171591510524335,
|
| 93 |
+
"128k": 0.7387615265550217,
|
| 94 |
+
"256k": 0.7047574884597809
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.740254405395005,
|
| 98 |
+
"Partial": 0.8155070474078848
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.8943471956938479,
|
| 102 |
+
"Moderate": 0.8694949682853881,
|
| 103 |
+
"Hard": 0.8603608124174508,
|
| 104 |
+
"Extreme": 0.5205560974396651
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.9235081407527015,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.9270526695526693,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.7583333333333333,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5388141391367185,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.8662194687189113,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.724952326567939,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.6769275451403334,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.837962962962963,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.8518498172294341,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.749861111111111,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.6916666666666667
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.7653921632804664,
|
| 121 |
+
"English": 0.7813389724808776
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.5773333333333334,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.7997603117800453,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.8156058789899132,
|
| 129 |
+
"16k": 0.8312258319915683,
|
| 130 |
+
"32k": 0.8146647150412942,
|
| 131 |
+
"64k": 0.8402343004850696,
|
| 132 |
+
"128k": 0.7648319163907665,
|
| 133 |
+
"256k": 0.7319992277816549
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.7681553658992594,
|
| 137 |
+
"Partial": 0.8399847883555894
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.9168344057692764,
|
| 141 |
+
"Moderate": 0.9117105202934518,
|
| 142 |
+
"Hard": 0.8867394849893248,
|
| 143 |
+
"Extreme": 0.5408505285512573
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.9362853987173203,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.9359547859547858,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.7916666666666666,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5458038576746401,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.8823540286034711,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.7446436845926303,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.6987021524631377,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.8824074074074073,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.8622815151611319,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.8040277777777778,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.75
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.7871986587353806,
|
| 160 |
+
"English": 0.8123219648247083
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.6106666666666667
|
| 164 |
+
}
|
eval/output/Gemini-3-Pro/thinking_context-1000000_bon-3_summary_1.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.7444437692866909,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.7427789295014662,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.7447363106713838,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.7458160676872228,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.7745388309264836,
|
| 14 |
+
"16k": 0.7754128692851981,
|
| 15 |
+
"32k": 0.7406998662183158,
|
| 16 |
+
"64k": 0.7360665359651601,
|
| 17 |
+
"128k": 0.7200303191357623,
|
| 18 |
+
"256k": 0.7199141941892306
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.7038735679054352,
|
| 22 |
+
"Partial": 0.7960785710446542
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.8521574624226764,
|
| 26 |
+
"Moderate": 0.8232309840478936,
|
| 27 |
+
"Hard": 0.8149532965165824,
|
| 28 |
+
"Extreme": 0.5283874999072132
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.9179995443398392,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.9302470060803394,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.647222222222222,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5445275822101924,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.8586059345941098,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.7356907117507652,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.6533662751050072,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.7958333333333335,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.8578963833903199,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.7022685185185185,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5694444444444444
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.7688522303446449,
|
| 45 |
+
"English": 0.7200353082287382
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.7427789295014662,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.7732187572229094,
|
| 51 |
+
"16k": 0.7749167759163603,
|
| 52 |
+
"32k": 0.7350093866099319,
|
| 53 |
+
"64k": 0.7325134598856343,
|
| 54 |
+
"128k": 0.7139442003278119,
|
| 55 |
+
"256k": 0.7270709970461549
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.7026463553328858,
|
| 59 |
+
"Partial": 0.7938567511705701
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.8588887004867032,
|
| 63 |
+
"Moderate": 0.8160008520263468,
|
| 64 |
+
"Hard": 0.8014342850740345,
|
| 65 |
+
"Extreme": 0.5289665590809083
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.9275243713204041,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.9297345247345244,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.65,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5414209883444141,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.8545607486756546,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.7302908483081344,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.6646114941873353,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.7726851851851853,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.8694202497279122,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.677361111111111,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5833333333333334
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.7678212367740256,
|
| 82 |
+
"English": 0.7177366222289083
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.5553333333333333,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.780493395295662,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.8099685348878681,
|
| 90 |
+
"16k": 0.8096316593199784,
|
| 91 |
+
"32k": 0.7687241043788289,
|
| 92 |
+
"64k": 0.7750412199917821,
|
| 93 |
+
"128k": 0.7540270340590343,
|
| 94 |
+
"256k": 0.7655678191364876
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.7433365901494087,
|
| 98 |
+
"Partial": 0.8277838745727145
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.8834642063049797,
|
| 102 |
+
"Moderate": 0.8652885877995169,
|
| 103 |
+
"Hard": 0.8464691991325216,
|
| 104 |
+
"Extreme": 0.5686644206389443
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.943392395293581,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.9515230602730601,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.725,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5582180210965143,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.9116409991735402,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.7710016326218597,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.6978423711611208,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.8129629629629629,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.8827102930179557,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.7609722222222222,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.6
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.8041097143355284,
|
| 121 |
+
"English": 0.7568770762557984
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.608,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.7991334239411454,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.8209355232816534,
|
| 129 |
+
"16k": 0.8144918448337997,
|
| 130 |
+
"32k": 0.7983357420453703,
|
| 131 |
+
"64k": 0.7861483205847641,
|
| 132 |
+
"128k": 0.7922785282211281,
|
| 133 |
+
"256k": 0.7826105846801548
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.768361490879967,
|
| 137 |
+
"Partial": 0.8382977023826439
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.894964097925158,
|
| 141 |
+
"Moderate": 0.8776700039091074,
|
| 142 |
+
"Hard": 0.8724845393280866,
|
| 143 |
+
"Extreme": 0.5943926766278237
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.9476078825332144,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.9601147463647463,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.7416666666666667,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5634335246423371,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.9164461939787348,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.7963203897543422,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.7336792229499021,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.8351851851851853,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.8886493660222459,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.7984722222222221,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.625
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.821213954045868,
|
| 160 |
+
"English": 0.7770528938364217
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.6353333333333333
|
| 164 |
+
}
|
eval/output/Qwen3-32B/thinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.511230077368557,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.5125551266359154,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.5093558836789825,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.5117792217907733,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.6093750682953369,
|
| 14 |
+
"16k": 0.5606336434386148,
|
| 15 |
+
"32k": 0.5407757802194578,
|
| 16 |
+
"64k": 0.49507317974870746,
|
| 17 |
+
"128k": 0.4498486791562606,
|
| 18 |
+
"256k": 0.4116741133529667
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.47415262721658286,
|
| 22 |
+
"Partial": 0.5584195593801601
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.7280370376711341,
|
| 26 |
+
"Moderate": 0.46181032202581557,
|
| 27 |
+
"Hard": 0.42241569201251666,
|
| 28 |
+
"Extreme": 0.36452260417788923
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.770604191490206,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.7391521133187797,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.47222222222222227,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5173657920719582,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.4466370952378391,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.4909460945890284,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.2977388131082807,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.5168209876543208,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.5534968208496264,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.49097222222222225,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.4416666666666667
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.5000696493273706,
|
| 45 |
+
"English": 0.5223905054097431
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.5125551266359154,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.6199505611114019,
|
| 51 |
+
"16k": 0.5449906257247921,
|
| 52 |
+
"32k": 0.5573838747951294,
|
| 53 |
+
"64k": 0.48881589783475765,
|
| 54 |
+
"128k": 0.4414701560105687,
|
| 55 |
+
"256k": 0.4227196443388436
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.4840737513137435,
|
| 59 |
+
"Partial": 0.5488041497732257
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.7237636460264806,
|
| 63 |
+
"Moderate": 0.45634319610108454,
|
| 64 |
+
"Hard": 0.435386696673209,
|
| 65 |
+
"Extreme": 0.368792439903043
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.7683888534222428,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.7350878750878753,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.475,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5164166688813505,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.4327235065619549,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.48097372489084406,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.28774132062670765,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.5685185185185185,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.552360721830303,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.4877777777777777,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.43333333333333335
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.5036040074009573,
|
| 82 |
+
"English": 0.5215062458708739
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.26,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.5889072134846955,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.6825707673405994,
|
| 90 |
+
"16k": 0.6560634814652357,
|
| 91 |
+
"32k": 0.6151197445549184,
|
| 92 |
+
"64k": 0.5702241454706968,
|
| 93 |
+
"128k": 0.5232780250539345,
|
| 94 |
+
"256k": 0.4861871170227882
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.5510603442388599,
|
| 98 |
+
"Partial": 0.6370759561612137
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.8070382684727199,
|
| 102 |
+
"Moderate": 0.5619198253755202,
|
| 103 |
+
"Hard": 0.5090922692146654,
|
| 104 |
+
"Extreme": 0.42010427156051966
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.8345700718573785,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.7945044307544308,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5666666666666667,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5346021319695609,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.5351280430956195,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.5706144095580495,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.38333556049784645,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.6296296296296297,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.6241105357978589,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.5713888888888888,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.525
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.5813115070957934,
|
| 121 |
+
"English": 0.5965029198735978
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.3393333333333333,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.6236268197943012,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.7099773255313729,
|
| 129 |
+
"16k": 0.6915751481616828,
|
| 130 |
+
"32k": 0.6387773010277447,
|
| 131 |
+
"64k": 0.615062805287137,
|
| 132 |
+
"128k": 0.5761902661960858,
|
| 133 |
+
"256k": 0.5101780725617869
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.5835815761216835,
|
| 137 |
+
"Partial": 0.6745934935594524
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.8441219878850473,
|
| 141 |
+
"Moderate": 0.6121934384709582,
|
| 142 |
+
"Hard": 0.5394052983997921,
|
| 143 |
+
"Extreme": 0.4449122649436655
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.8757613870336376,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.8133404095904098,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.6083333333333333,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5410211462556259,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.5882233881768194,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.6075727032146355,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.41240156108775533,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.6592592592592592,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.658971964363135,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.6241666666666666,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.5666666666666667
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.6173730284036301,
|
| 160 |
+
"English": 0.6298806111849737
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.37466666666666665
|
| 164 |
+
}
|
eval/output/Qwen3-8B/nonthinking_context-120000_bon-3_summary.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"date": "2025-12-08",
|
| 3 |
+
"total_questions_num": 1500,
|
| 4 |
+
"inference_iterations": 3,
|
| 5 |
+
"total_samples_num": 4500,
|
| 6 |
+
"fail_samples_num": 0,
|
| 7 |
+
"inference_inconsistent_samples_num": 0,
|
| 8 |
+
"average_overall_metric": 0.3341263002850569,
|
| 9 |
+
"inference_iteration_1_overall_metric": 0.33209862396824386,
|
| 10 |
+
"inference_iteration_2_overall_metric": 0.33541946267252254,
|
| 11 |
+
"inference_iteration_3_overall_metric": 0.3348608142144041,
|
| 12 |
+
"average_token_length_metric": {
|
| 13 |
+
"8k": 0.3784198847034242,
|
| 14 |
+
"16k": 0.37953608827547447,
|
| 15 |
+
"32k": 0.36499771721065843,
|
| 16 |
+
"64k": 0.28008358934905153,
|
| 17 |
+
"128k": 0.3132115847486681,
|
| 18 |
+
"256k": 0.2885089374230642
|
| 19 |
+
},
|
| 20 |
+
"average_contextual_requirement_metric": {
|
| 21 |
+
"Full": 0.3137072166929107,
|
| 22 |
+
"Partial": 0.3601142248568793
|
| 23 |
+
},
|
| 24 |
+
"average_difficulty_metric": {
|
| 25 |
+
"Easy": 0.428559130222621,
|
| 26 |
+
"Moderate": 0.25195385511657276,
|
| 27 |
+
"Hard": 0.31092306311632556,
|
| 28 |
+
"Extreme": 0.2998920915703961
|
| 29 |
+
},
|
| 30 |
+
"average_primary_task_metric": {
|
| 31 |
+
"T1. Retrieval & Ranking": 0.6412648134912415,
|
| 32 |
+
"T2. Sequencing & Structure Reconstruction": 0.6145308824378115,
|
| 33 |
+
"T3. Evidence-Grounded QA": 0.46388888888888896,
|
| 34 |
+
"T4. Summarization & Synthesis": 0.5163026571908423,
|
| 35 |
+
"T5. Attribution & Citation Alignment": 0.2523030174294647,
|
| 36 |
+
"T6. Aggregation & Clustering": 0.3470095028864795,
|
| 37 |
+
"T7. Consistency & Compliance Checking": 0.1606584732901947,
|
| 38 |
+
"T8. Structured & Numeric Reasoning": 0.07453703703703704,
|
| 39 |
+
"T9. Version & Code Diff Analysis": 0.2941939372673569,
|
| 40 |
+
"T10. Rule Induction & In-Context Learning": 0.30689814814814814,
|
| 41 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.2138888888888889
|
| 42 |
+
},
|
| 43 |
+
"average_language_metric": {
|
| 44 |
+
"Chinese": 0.33785469604432067,
|
| 45 |
+
"English": 0.3303979045257931
|
| 46 |
+
},
|
| 47 |
+
"BoN-1": {
|
| 48 |
+
"overall_metric": 0.33209862396824386,
|
| 49 |
+
"token_length": {
|
| 50 |
+
"8k": 0.37642064292707555,
|
| 51 |
+
"16k": 0.3763334563411937,
|
| 52 |
+
"32k": 0.3747061763092641,
|
| 53 |
+
"64k": 0.26888181342117645,
|
| 54 |
+
"128k": 0.2968232384609332,
|
| 55 |
+
"256k": 0.2994264163498193
|
| 56 |
+
},
|
| 57 |
+
"contextual_requirement": {
|
| 58 |
+
"Full": 0.31079609686291426,
|
| 59 |
+
"Partial": 0.35921093119320874
|
| 60 |
+
},
|
| 61 |
+
"difficulty": {
|
| 62 |
+
"Easy": 0.42792762230121,
|
| 63 |
+
"Moderate": 0.25490326136513564,
|
| 64 |
+
"Hard": 0.30852284836466365,
|
| 65 |
+
"Extreme": 0.2933106279347054
|
| 66 |
+
},
|
| 67 |
+
"primary_task": {
|
| 68 |
+
"T1. Retrieval & Ranking": 0.6344283740437071,
|
| 69 |
+
"T2. Sequencing & Structure Reconstruction": 0.627808712385359,
|
| 70 |
+
"T3. Evidence-Grounded QA": 0.45,
|
| 71 |
+
"T4. Summarization & Synthesis": 0.5165407101035675,
|
| 72 |
+
"T5. Attribution & Citation Alignment": 0.2452312681950123,
|
| 73 |
+
"T6. Aggregation & Clustering": 0.3367099858810609,
|
| 74 |
+
"T7. Consistency & Compliance Checking": 0.15639989872237942,
|
| 75 |
+
"T8. Structured & Numeric Reasoning": 0.08287037037037037,
|
| 76 |
+
"T9. Version & Code Diff Analysis": 0.30783668574801865,
|
| 77 |
+
"T10. Rule Induction & In-Context Learning": 0.29708333333333337,
|
| 78 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.20833333333333334
|
| 79 |
+
},
|
| 80 |
+
"language": {
|
| 81 |
+
"Chinese": 0.3358334164162984,
|
| 82 |
+
"English": 0.3283638315201894
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
"pass@1": 0.11333333333333333,
|
| 86 |
+
"BoN-2": {
|
| 87 |
+
"overall_metric": 0.3675196408761814,
|
| 88 |
+
"token_length": {
|
| 89 |
+
"8k": 0.3886475538954706,
|
| 90 |
+
"16k": 0.41257816091326605,
|
| 91 |
+
"32k": 0.41159385509736474,
|
| 92 |
+
"64k": 0.31664455480081405,
|
| 93 |
+
"128k": 0.35128941591866764,
|
| 94 |
+
"256k": 0.32436430463150734
|
| 95 |
+
},
|
| 96 |
+
"contextual_requirement": {
|
| 97 |
+
"Full": 0.34706677159026683,
|
| 98 |
+
"Partial": 0.39355056542189176
|
| 99 |
+
},
|
| 100 |
+
"difficulty": {
|
| 101 |
+
"Easy": 0.4703351630161875,
|
| 102 |
+
"Moderate": 0.29345326870943717,
|
| 103 |
+
"Hard": 0.341763065112941,
|
| 104 |
+
"Extreme": 0.32045543696773376
|
| 105 |
+
},
|
| 106 |
+
"primary_task": {
|
| 107 |
+
"T1. Retrieval & Ranking": 0.6724018408268643,
|
| 108 |
+
"T2. Sequencing & Structure Reconstruction": 0.6527223172989636,
|
| 109 |
+
"T3. Evidence-Grounded QA": 0.5166666666666667,
|
| 110 |
+
"T4. Summarization & Synthesis": 0.5284447990610109,
|
| 111 |
+
"T5. Attribution & Citation Alignment": 0.2918155891981263,
|
| 112 |
+
"T6. Aggregation & Clustering": 0.3951103080046706,
|
| 113 |
+
"T7. Consistency & Compliance Checking": 0.17493867106904193,
|
| 114 |
+
"T8. Structured & Numeric Reasoning": 0.09398148148148147,
|
| 115 |
+
"T9. Version & Code Diff Analysis": 0.33853749595673677,
|
| 116 |
+
"T10. Rule Induction & In-Context Learning": 0.33902777777777776,
|
| 117 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.25833333333333336
|
| 118 |
+
},
|
| 119 |
+
"language": {
|
| 120 |
+
"Chinese": 0.36969778846765494,
|
| 121 |
+
"English": 0.36534149328470883
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
"pass@2": 0.13133333333333333,
|
| 125 |
+
"BoN-3": {
|
| 126 |
+
"overall_metric": 0.3871899208655039,
|
| 127 |
+
"token_length": {
|
| 128 |
+
"8k": 0.40912701437278987,
|
| 129 |
+
"16k": 0.4254527731119934,
|
| 130 |
+
"32k": 0.43307631053998147,
|
| 131 |
+
"64k": 0.3370765423947205,
|
| 132 |
+
"128k": 0.3753522812020257,
|
| 133 |
+
"256k": 0.3430546035715128
|
| 134 |
+
},
|
| 135 |
+
"contextual_requirement": {
|
| 136 |
+
"Full": 0.36557204234325574,
|
| 137 |
+
"Partial": 0.4147035844392751
|
| 138 |
+
},
|
| 139 |
+
"difficulty": {
|
| 140 |
+
"Easy": 0.48861463017463347,
|
| 141 |
+
"Moderate": 0.30875721939034645,
|
| 142 |
+
"Hard": 0.3637658481982982,
|
| 143 |
+
"Extreme": 0.3429851432294641
|
| 144 |
+
},
|
| 145 |
+
"primary_task": {
|
| 146 |
+
"T1. Retrieval & Ranking": 0.6857312280559049,
|
| 147 |
+
"T2. Sequencing & Structure Reconstruction": 0.6763734162000631,
|
| 148 |
+
"T3. Evidence-Grounded QA": 0.5416666666666666,
|
| 149 |
+
"T4. Summarization & Synthesis": 0.5345601158557589,
|
| 150 |
+
"T5. Attribution & Citation Alignment": 0.32057216845982056,
|
| 151 |
+
"T6. Aggregation & Clustering": 0.4271227501408007,
|
| 152 |
+
"T7. Consistency & Compliance Checking": 0.19336910203847105,
|
| 153 |
+
"T8. Structured & Numeric Reasoning": 0.11620370370370368,
|
| 154 |
+
"T9. Version & Code Diff Analysis": 0.3618993039783438,
|
| 155 |
+
"T10. Rule Induction & In-Context Learning": 0.3556944444444444,
|
| 156 |
+
"T11. Dialogue Memory & Long-Horizon Tracking": 0.25833333333333336
|
| 157 |
+
},
|
| 158 |
+
"language": {
|
| 159 |
+
"Chinese": 0.3912716913429664,
|
| 160 |
+
"English": 0.38310815038804175
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
"pass@3": 0.14133333333333334
|
| 164 |
+
}
|