Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- VLMEvalKit-sudoku/vlmeval/dataset/charxiv.py +256 -0
- VLMEvalKit-sudoku/vlmeval/dataset/spatial457.py +212 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/ayavision.py +50 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__init__.py +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/mp_util.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/global_config.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_req.txt +5 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__init__.py +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_utils.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_and_color.py +971 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator.py +179 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/mp_util.py +76 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/hrbench.py +54 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/logicvista.py +150 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/__init__.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/aggregation_type.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/metric_type.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/mean_agg.py +22 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/unsupported_agg.py +8 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/chess_jaccard.py +24 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/constrained_generation.py +518 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/coordinate_sequence_match.py +65 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_jaccard_agg_jaccard.py +28 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_set_equality_agg_jaccard.py +28 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match_case_insensitive.py +12 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/geo_proximity.py +100 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/gleu.py +17 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/jaccard.py +75 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/latex_expr_equality.py +96 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/program_judge.py +141 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sequence_equality.py +62 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_equality.py +74 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/xml_norm_point_in_bbox.py +35 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/analysis_utils.py +182 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/derive_breakdown_results.py +141 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/get_si_subset_from_full.py +92 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mmdu.py +126 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci.py +434 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci4eval_req.txt +6 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/olympiadbench.py +703 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/omni_verifier.py +220 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/qbench_video.py +49 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/qspatial.py +123 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/seephys.py +312 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/__init__.py +0 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/aquarium/filter_prompt.json +118 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/battleships/filter_prompt.json +91 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/binairo/filter_prompt.json +67 -0
- VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/fieldexplore/filter_prompt.json +102 -0
VLMEvalKit-sudoku/vlmeval/dataset/charxiv.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from typing import Dict, List, Tuple, Any, Union
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
from vlmeval.dataset.image_base import ImageBaseDataset
|
| 8 |
+
from vlmeval.smp import misc, file
|
| 9 |
+
from vlmeval.smp.file import get_intermediate_file_path
|
| 10 |
+
from vlmeval import utils
|
| 11 |
+
from vlmeval.dataset.utils import build_judge
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def auxeval(judge_model: Any, line: pd.Series, **kwargs: Any) -> Dict[str, Any]:
|
| 15 |
+
"""
|
| 16 |
+
Evaluate a line using the judge model.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
judge_model: The model used for evaluation
|
| 20 |
+
line: A pandas Series containing the data to evaluate
|
| 21 |
+
**kwargs: Additional arguments for the judge model
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
Dict containing evaluation results with extract_answer and score
|
| 25 |
+
"""
|
| 26 |
+
failure_result = {"extract_answer": "Failed to parse response", "score": 0.0}
|
| 27 |
+
prompt = line["grading_query"].replace("{PREDICTION}", line["prediction"])
|
| 28 |
+
|
| 29 |
+
retry = kwargs.get("retry", 10)
|
| 30 |
+
max_tokens = kwargs.get("max_tokens", 256)
|
| 31 |
+
temperature = kwargs.get("temperature", 0)
|
| 32 |
+
seed = kwargs.get("seed", 42)
|
| 33 |
+
top_p = kwargs.get("top_p", 1)
|
| 34 |
+
|
| 35 |
+
for _ in range(retry):
|
| 36 |
+
try:
|
| 37 |
+
response = judge_model.generate(
|
| 38 |
+
prompt,
|
| 39 |
+
temperature=temperature,
|
| 40 |
+
max_tokens=max_tokens,
|
| 41 |
+
seed=seed,
|
| 42 |
+
top_p=top_p,
|
| 43 |
+
)
|
| 44 |
+
content = json.loads(response)
|
| 45 |
+
if not isinstance(content, dict):
|
| 46 |
+
return failure_result
|
| 47 |
+
if "score" not in content or "extract_answer" not in content:
|
| 48 |
+
return failure_result
|
| 49 |
+
return content
|
| 50 |
+
except Exception:
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
return failure_result
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def qid2category(mode: str) -> Tuple[Dict[int, str], str]:
|
| 57 |
+
"""
|
| 58 |
+
Map question IDs to their categories based on the evaluation mode.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
mode: Either "descriptive" or "reasoning"
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
Tuple containing a mapping dictionary and the index column name
|
| 65 |
+
|
| 66 |
+
Raises:
|
| 67 |
+
ValueError: If the mode is not recognized
|
| 68 |
+
"""
|
| 69 |
+
if mode == "descriptive":
|
| 70 |
+
index_col = "qid"
|
| 71 |
+
return {
|
| 72 |
+
1: "Information Extraction",
|
| 73 |
+
2: "Information Extraction",
|
| 74 |
+
3: "Information Extraction",
|
| 75 |
+
4: "Information Extraction",
|
| 76 |
+
5: "Information Extraction",
|
| 77 |
+
6: "Information Extraction",
|
| 78 |
+
7: "Information Extraction",
|
| 79 |
+
8: "Enumeration",
|
| 80 |
+
9: "Enumeration",
|
| 81 |
+
10: "Counting",
|
| 82 |
+
11: "Pattern Recognition",
|
| 83 |
+
12: "Counting",
|
| 84 |
+
13: "Enumeration",
|
| 85 |
+
14: "Enumeration",
|
| 86 |
+
15: "Enumeration",
|
| 87 |
+
16: "Pattern Recognition",
|
| 88 |
+
17: "Compositionality",
|
| 89 |
+
18: "Pattern Recognition",
|
| 90 |
+
19: "Counting",
|
| 91 |
+
}, index_col
|
| 92 |
+
elif mode == "reasoning":
|
| 93 |
+
index_col = "inst_category"
|
| 94 |
+
return {
|
| 95 |
+
1: "Text-in-Chart",
|
| 96 |
+
2: "Text-in-General",
|
| 97 |
+
3: "Number-in-Chart",
|
| 98 |
+
4: "Number-in-General",
|
| 99 |
+
}, index_col
|
| 100 |
+
else:
|
| 101 |
+
raise ValueError(f"Invalid mode: {mode}")
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class CharXiv(ImageBaseDataset):
|
| 105 |
+
TYPE = "VQA"
|
| 106 |
+
DATASET_URL = {
|
| 107 |
+
"CharXiv_descriptive_val": "http://opencompass.openxlab.space/utils/VLMEval/CharXiv_descriptive_val.tsv",
|
| 108 |
+
"CharXiv_reasoning_val": "http://opencompass.openxlab.space/utils/VLMEval/CharXiv_reasoning_val.tsv",
|
| 109 |
+
}
|
| 110 |
+
DATASET_MD5 = {
|
| 111 |
+
"CharXiv_descriptive_val": "e165037032f169a59dd09ea5d7ad3073",
|
| 112 |
+
"CharXiv_reasoning_val": "98eeff269b40726982627b19338ccd45",
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
def build_prompt(self, line: Union[int, pd.Series]) -> List[Dict[str, str]]:
|
| 116 |
+
"""
|
| 117 |
+
Build a prompt for the model from a data line.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
line: Either an index into the dataset or a pandas Series
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
List of message dictionaries containing the image and question
|
| 124 |
+
"""
|
| 125 |
+
if isinstance(line, int):
|
| 126 |
+
line = self.data.iloc[line]
|
| 127 |
+
|
| 128 |
+
if self.meta_only:
|
| 129 |
+
tgt_path = misc.toliststr(line["image"])
|
| 130 |
+
else:
|
| 131 |
+
tgt_path = self.dump_image(line)
|
| 132 |
+
|
| 133 |
+
messages = [{"type": "image", "value": tgt_path[0]}]
|
| 134 |
+
messages.append({"type": "text", "value": line["question"]})
|
| 135 |
+
return messages
|
| 136 |
+
|
| 137 |
+
def get_scores(self, result_file: str) -> pd.DataFrame:
|
| 138 |
+
"""
|
| 139 |
+
Calculate scores by category from evaluation results.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
result_file: Path to the file containing evaluation results
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
DataFrame with scores for each category and overall score
|
| 146 |
+
|
| 147 |
+
Raises:
|
| 148 |
+
ValueError: If the dataset name is invalid
|
| 149 |
+
"""
|
| 150 |
+
data = file.load(result_file)
|
| 151 |
+
|
| 152 |
+
if "descriptive" in self.dataset_name:
|
| 153 |
+
mode = "descriptive"
|
| 154 |
+
elif "reasoning" in self.dataset_name:
|
| 155 |
+
mode = "reasoning"
|
| 156 |
+
else:
|
| 157 |
+
raise ValueError(f"Invalid dataset name: {self.dataset_name}")
|
| 158 |
+
|
| 159 |
+
category_map, index_col = qid2category(mode)
|
| 160 |
+
|
| 161 |
+
# Group scores by category
|
| 162 |
+
scores_by_category = {}
|
| 163 |
+
for _, row in data.iterrows():
|
| 164 |
+
category = category_map[row[index_col]]
|
| 165 |
+
if category not in scores_by_category:
|
| 166 |
+
scores_by_category[category] = []
|
| 167 |
+
scores_by_category[category].append(row["score"])
|
| 168 |
+
|
| 169 |
+
# Calculate average score for each category
|
| 170 |
+
result = {}
|
| 171 |
+
for category, scores in scores_by_category.items():
|
| 172 |
+
result[category] = [sum(scores) / len(scores)]
|
| 173 |
+
|
| 174 |
+
# Calculate overall score
|
| 175 |
+
result["Overall"] = [
|
| 176 |
+
sum(sum(scores) for scores in scores_by_category.values()) / len(data)
|
| 177 |
+
]
|
| 178 |
+
|
| 179 |
+
return pd.DataFrame(result)
|
| 180 |
+
|
| 181 |
+
def evaluate(self, eval_file: str, **judge_kwargs: Any) -> pd.DataFrame:
|
| 182 |
+
"""
|
| 183 |
+
Evaluate model predictions on the CharXiv dataset.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
eval_file: Path to the file containing model predictions
|
| 187 |
+
**judge_kwargs: Additional arguments for the judge model
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
DataFrame with evaluation scores by category
|
| 191 |
+
"""
|
| 192 |
+
# Set up judge model
|
| 193 |
+
if "LOCAL_LLM" in os.environ:
|
| 194 |
+
judge_model = os.path.basename(os.environ.get("LOCAL_LLM"))
|
| 195 |
+
else:
|
| 196 |
+
judge_model = judge_kwargs.get("model", "gpt-4o-mini")
|
| 197 |
+
|
| 198 |
+
if judge_model != "gpt-4o-mini":
|
| 199 |
+
warnings.warn(
|
| 200 |
+
f"The judge_model '{judge_model}' is not gpt-4o-mini. Evaluation results may not be accurate."
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
judge_model = build_judge(model=judge_model, **judge_kwargs)
|
| 204 |
+
judge_model_name = judge_model.model
|
| 205 |
+
|
| 206 |
+
# Define file paths
|
| 207 |
+
result_file = get_intermediate_file_path(eval_file, f"_{judge_model_name}")
|
| 208 |
+
temp_result_file = get_intermediate_file_path(eval_file, f"_{judge_model_name}", "pkl")
|
| 209 |
+
score_file = get_intermediate_file_path(result_file, "_acc", "csv")
|
| 210 |
+
|
| 211 |
+
# Return existing results if available
|
| 212 |
+
if os.path.exists(result_file):
|
| 213 |
+
score = self.get_scores(result_file)
|
| 214 |
+
file.dump(score, score_file)
|
| 215 |
+
return score
|
| 216 |
+
|
| 217 |
+
data = file.load(eval_file)
|
| 218 |
+
if "score" not in data.columns:
|
| 219 |
+
data["score"] = 0
|
| 220 |
+
if "extract_answer" not in data.columns:
|
| 221 |
+
data["extract_answer"] = ""
|
| 222 |
+
|
| 223 |
+
# Load intermediate results if available
|
| 224 |
+
processed_results = {}
|
| 225 |
+
if os.path.exists(temp_result_file):
|
| 226 |
+
processed_results = file.load(temp_result_file)
|
| 227 |
+
|
| 228 |
+
# Identify unprocessed indices
|
| 229 |
+
indices = [i for i in range(len(data)) if i not in processed_results]
|
| 230 |
+
tups = [(judge_model, data.iloc[i]) for i in range(len(data)) if i not in processed_results]
|
| 231 |
+
|
| 232 |
+
# Process remaining examples
|
| 233 |
+
nproc = judge_kwargs.pop("nproc", 4)
|
| 234 |
+
if len(indices):
|
| 235 |
+
utils.track_progress_rich(
|
| 236 |
+
auxeval,
|
| 237 |
+
tups,
|
| 238 |
+
nproc=nproc,
|
| 239 |
+
chunksize=nproc,
|
| 240 |
+
keys=indices,
|
| 241 |
+
save=temp_result_file,
|
| 242 |
+
**judge_kwargs,
|
| 243 |
+
)
|
| 244 |
+
processed_results = file.load(temp_result_file)
|
| 245 |
+
|
| 246 |
+
# Update data with evaluation results
|
| 247 |
+
data["score"] = data.apply(lambda x: processed_results[x.name]["score"], axis=1)
|
| 248 |
+
data["extract_answer"] = data.apply(
|
| 249 |
+
lambda x: processed_results[x.name]["extract_answer"], axis=1
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Save results and return scores
|
| 253 |
+
file.dump(data, result_file)
|
| 254 |
+
score = self.get_scores(result_file)
|
| 255 |
+
file.dump(score, score_file)
|
| 256 |
+
return score
|
VLMEvalKit-sudoku/vlmeval/dataset/spatial457.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import tempfile
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
|
| 8 |
+
from .image_base import ImageBaseDataset
|
| 9 |
+
from .utils import build_judge, DEBUG_MESSAGE, Spatial457_utils
|
| 10 |
+
from ..smp import *
|
| 11 |
+
from ..utils import track_progress_rich
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Spatial457(ImageBaseDataset):
|
| 15 |
+
TYPE = "VQA"
|
| 16 |
+
# When ROBUST is True, if the models does not follow the format, all of the response will be treated as answers.
|
| 17 |
+
ROBUST = True
|
| 18 |
+
|
| 19 |
+
DATASET_URL = {
|
| 20 |
+
"Spatial457": "http://opencompass.openxlab.space/utils/VLMEval/Spatial457.tsv",
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
DATASET_MD5 = {
|
| 24 |
+
'Spatial457': "1f24f5a7b2cadc3d33a8a66ecf92ca68"
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
def __init__(self, *args, **kwargs):
|
| 28 |
+
super().__init__(*args, **kwargs)
|
| 29 |
+
|
| 30 |
+
self.dataset_utils = Spatial457_utils()
|
| 31 |
+
|
| 32 |
+
def evaluate(self, eval_file, **judge_kwargs):
|
| 33 |
+
|
| 34 |
+
data = load(eval_file)
|
| 35 |
+
data['prediction'] = [str(x) for x in data['prediction']]
|
| 36 |
+
lt = len(data)
|
| 37 |
+
lines = [data.iloc[i] for i in range(lt)]
|
| 38 |
+
|
| 39 |
+
all_results = {
|
| 40 |
+
"correct": 0,
|
| 41 |
+
"total": 0,
|
| 42 |
+
"answers": [],
|
| 43 |
+
"format_error": 0,
|
| 44 |
+
"L1_single": 0,
|
| 45 |
+
"L2_objects": 0,
|
| 46 |
+
"L3_2d_spatial": 0,
|
| 47 |
+
"L4_occ": 0,
|
| 48 |
+
"L4_pose": 0,
|
| 49 |
+
"L5_6d_spatial": 0,
|
| 50 |
+
"L5_collision": 0,
|
| 51 |
+
"L1_single_correct": 0,
|
| 52 |
+
"L2_objects_correct": 0,
|
| 53 |
+
"L3_2d_spatial_correct": 0,
|
| 54 |
+
"L4_occ_correct": 0,
|
| 55 |
+
"L4_pose_correct": 0,
|
| 56 |
+
"L5_6d_spatial_correct": 0,
|
| 57 |
+
"L5_collision_correct": 0,
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
for i in tqdm(range(len(lines))):
|
| 61 |
+
|
| 62 |
+
line = lines[i]
|
| 63 |
+
index = int(line["index"])
|
| 64 |
+
|
| 65 |
+
answers = str(line["answer"])
|
| 66 |
+
level = line["category"]
|
| 67 |
+
objects = []
|
| 68 |
+
|
| 69 |
+
# parse the answer
|
| 70 |
+
pred_try_1 = re.search(r"Answer': '(.*?)'", line["prediction"])
|
| 71 |
+
pred_try_2 = re.search(r'Answer": "(.*?)"', line["prediction"])
|
| 72 |
+
pred_try_3 = re.search(r"Answer': (\d)", line["prediction"])
|
| 73 |
+
|
| 74 |
+
if pred_try_1:
|
| 75 |
+
pred = pred_try_1.group(1)
|
| 76 |
+
elif pred_try_2:
|
| 77 |
+
pred = pred_try_2.group(1)
|
| 78 |
+
elif pred_try_3:
|
| 79 |
+
pred = pred_try_3.group(1)
|
| 80 |
+
else:
|
| 81 |
+
if self.ROBUST:
|
| 82 |
+
pred = line['prediction']
|
| 83 |
+
else:
|
| 84 |
+
pred = self.dataset_utils.get_random_answer(answers)
|
| 85 |
+
all_results["format_error"] += 1
|
| 86 |
+
|
| 87 |
+
reasoning_try_1 = re.search(r"Reasoning': '(.*?)'", line["prediction"])
|
| 88 |
+
reasoning_try_2 = re.search(r'Reasoning": "(.*?)"', line["prediction"])
|
| 89 |
+
|
| 90 |
+
if reasoning_try_1:
|
| 91 |
+
reasoning = reasoning_try_1.group(1)
|
| 92 |
+
elif reasoning_try_2:
|
| 93 |
+
reasoning = reasoning_try_2.group(1)
|
| 94 |
+
else:
|
| 95 |
+
if self.ROBUST:
|
| 96 |
+
reasoning = "Format Error. All of the resposne as the answer."
|
| 97 |
+
else:
|
| 98 |
+
reasoning = "Format Error. Guess a random answer."
|
| 99 |
+
|
| 100 |
+
correct = self.dataset_utils.is_correct(answers, pred)
|
| 101 |
+
|
| 102 |
+
all_results["answers"].append(
|
| 103 |
+
{
|
| 104 |
+
"index": index,
|
| 105 |
+
"correct": correct,
|
| 106 |
+
"answers": answers,
|
| 107 |
+
"predict": pred,
|
| 108 |
+
"reasoning": reasoning,
|
| 109 |
+
"objects": objects,
|
| 110 |
+
}
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
all_results["total"] += 1
|
| 114 |
+
if correct:
|
| 115 |
+
all_results["correct"] += 1
|
| 116 |
+
|
| 117 |
+
all_results[f"{level}"] += 1
|
| 118 |
+
if correct:
|
| 119 |
+
all_results[f"{level}_correct"] += 1
|
| 120 |
+
|
| 121 |
+
all_results["score"] = all_results["correct"] / all_results["total"]
|
| 122 |
+
|
| 123 |
+
for level in [
|
| 124 |
+
"L1_single",
|
| 125 |
+
"L2_objects",
|
| 126 |
+
"L3_2d_spatial",
|
| 127 |
+
"L4_occ",
|
| 128 |
+
"L4_pose",
|
| 129 |
+
"L5_6d_spatial",
|
| 130 |
+
"L5_collision",
|
| 131 |
+
]:
|
| 132 |
+
all_results[f"{level}_score"] = (
|
| 133 |
+
all_results[f"{level}_correct"] / all_results[level] if all_results[level] > 0 else 0
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
score_pth = get_intermediate_file_path(eval_file, "_score", "json")
|
| 137 |
+
|
| 138 |
+
dump(all_results, score_pth)
|
| 139 |
+
return all_results
|
| 140 |
+
|
| 141 |
+
def build_prompt(self, line):
|
| 142 |
+
msgs = super().build_prompt(line)
|
| 143 |
+
|
| 144 |
+
set_type = line["category"]
|
| 145 |
+
|
| 146 |
+
instruction_1, instruction_2 = self.build_subtask_instruction(set_type)
|
| 147 |
+
|
| 148 |
+
msgs.insert(0, {"type": "text", "value": instruction_1})
|
| 149 |
+
msgs.append({"type": "text", "value": instruction_2})
|
| 150 |
+
|
| 151 |
+
return msgs
|
| 152 |
+
|
| 153 |
+
def build_subtask_instruction(self, level):
|
| 154 |
+
|
| 155 |
+
task_map = {
|
| 156 |
+
"L1_single": (
|
| 157 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 158 |
+
"analyze the images, identify attributes of the objects, "
|
| 159 |
+
"and then determine the answer to the question.\n"
|
| 160 |
+
),
|
| 161 |
+
"L2_objects": (
|
| 162 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 163 |
+
"analyze the images, identify attributes of multiple objects, "
|
| 164 |
+
"and then determine the answer to the question.\n"
|
| 165 |
+
),
|
| 166 |
+
"L3_2d_spatial": (
|
| 167 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 168 |
+
"analyze the images, identify attributes of multiple objects and their spatial relationship from 2D "
|
| 169 |
+
"projected camera view, and then determine the answer to the question.\n"
|
| 170 |
+
),
|
| 171 |
+
"L4_occ": (
|
| 172 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 173 |
+
"analyze the images, identify attributes of multiple objects and their occlusion relationships, and "
|
| 174 |
+
"then determine the answer to the question.\n"
|
| 175 |
+
),
|
| 176 |
+
"L4_pose": (
|
| 177 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 178 |
+
"analyze the images, identify attributes of multiple objects and their facing direction in 3D space "
|
| 179 |
+
"from the camera view, and then determine the answer to the question.\n"
|
| 180 |
+
),
|
| 181 |
+
"L5_6d_spatial": (
|
| 182 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 183 |
+
"analyze the images, identify attributes of multiple objects and their spatial relationship from "
|
| 184 |
+
"objects’ perspective in 3D space, and then determine the answer to the question.\n"
|
| 185 |
+
),
|
| 186 |
+
"L5_collision": (
|
| 187 |
+
"You are an intelligent chatbot designed to answer questions based on an image. Your task is to "
|
| 188 |
+
"analyze the images, identify attributes of multiple objects and their potential collision given the "
|
| 189 |
+
"assumption of moving direction in 3D space, and then determine the answer to the question.\n"
|
| 190 |
+
),
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
instruction_1 = task_map.get(level, "")
|
| 194 |
+
|
| 195 |
+
instruction_2 = (
|
| 196 |
+
"First, you should identify the related objects refered in the questions, including their shape, "
|
| 197 |
+
"color, size; then add a brief reasoning process about the questions. Each object in the image has a "
|
| 198 |
+
"shape (e.g., 'airliner'), a size (only can be 'small' or 'large'), a color (e.g. 'blue'). The size of "
|
| 199 |
+
"the object is either 'small' or 'large'. The color of the object is one of the following: 'gray', "
|
| 200 |
+
"'blue', 'purple', 'brown', 'green', 'cyan', 'red', 'yellow'. The direction of the object is one of the "
|
| 201 |
+
"following: 'left', 'right', 'front', 'back'.\n\n"
|
| 202 |
+
"Second, give the answer based on the reasoning process. The answer should only be (1) a phrase chosen "
|
| 203 |
+
"from the following options: {}, or (2) an integer [0-10] when asked for 'How many' or 'What is the "
|
| 204 |
+
"number of', or (3) 'Yes' or 'No' when asked for 'Is there'. If you think there are no possible answers "
|
| 205 |
+
"or the question is not clear, choose the best answer that fits the question.\n\n"
|
| 206 |
+
).format(self.dataset_utils.all_answers())
|
| 207 |
+
|
| 208 |
+
instruction_2 += (
|
| 209 |
+
"Write your response into this json template: " "{'Reasoning': '<your reasons>', 'Answer': '<Your answer>'}"
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
return instruction_1, instruction_2
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/ayavision.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from ...smp import *
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
FAIL_MSG = "Failed to obtain answer via API."
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def build_prompt_ayavision(line):
|
| 9 |
+
question = line["question"]
|
| 10 |
+
prediction = str(line["prediction"])
|
| 11 |
+
answer = str(line["answer"])
|
| 12 |
+
|
| 13 |
+
tmpl = (
|
| 14 |
+
"You are an expert evaluator. Your task is to determine if the predicted answer "
|
| 15 |
+
"is a correct response to the given question, using the ground truth answer as a reference. "
|
| 16 |
+
"The predicted answer does not need to be a verbatim match of the ground truth, "
|
| 17 |
+
"but it must be semantically equivalent and accurately answer the question.\n"
|
| 18 |
+
"Respond with '[[CORRECT]]' if the prediction is correct, and '[[WRONG]]' if it is incorrect. "
|
| 19 |
+
"Do not provide any explanation.\n\n"
|
| 20 |
+
"Question: {question}\n"
|
| 21 |
+
"Ground Truth Answer: {answer}\n"
|
| 22 |
+
"Predicted Answer: {prediction}\n\n"
|
| 23 |
+
"Is the prediction correct? "
|
| 24 |
+
)
|
| 25 |
+
return tmpl.format(question=question, answer=answer, prediction=prediction)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def AyaVision_auxeval(model, line):
|
| 29 |
+
prompt = build_prompt_ayavision(line)
|
| 30 |
+
log = ""
|
| 31 |
+
retry = 5
|
| 32 |
+
|
| 33 |
+
for i in range(retry):
|
| 34 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 35 |
+
|
| 36 |
+
if FAIL_MSG in res:
|
| 37 |
+
log += f"Try {i}: output is {res}, failed to parse.\\n"
|
| 38 |
+
elif "[[CORRECT]]" in res:
|
| 39 |
+
log += "Succeed"
|
| 40 |
+
hit = 1
|
| 41 |
+
return dict(log=log, res=res, hit=hit)
|
| 42 |
+
elif "[[WRONG]]" in res:
|
| 43 |
+
log += "Succeed"
|
| 44 |
+
hit = 0
|
| 45 |
+
return dict(log=log, res=res, hit=hit)
|
| 46 |
+
else:
|
| 47 |
+
log += f"Try {i}: output is {res}, failed to parse.\\n"
|
| 48 |
+
|
| 49 |
+
log += "All 5 retries failed.\\n"
|
| 50 |
+
return dict(log=log, res="", hit=0)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__init__.py
ADDED
|
File without changes
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (165 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/mp_util.cpython-310.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/global_config.cpython-310.pyc
ADDED
|
Binary file (1.97 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_req.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pdf2image
|
| 2 |
+
colormath
|
| 3 |
+
squarify
|
| 4 |
+
matplotlib_venn
|
| 5 |
+
Pillow
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__init__.py
ADDED
|
File without changes
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_utils.cpython-310.pyc
ADDED
|
Binary file (2.17 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_and_color.py
ADDED
|
@@ -0,0 +1,971 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
import inspect
|
| 3 |
+
from matplotlib.patches import Ellipse
|
| 4 |
+
from matplotlib.image import NonUniformImage
|
| 5 |
+
from matplotlib.projections.polar import PolarAxes
|
| 6 |
+
import networkx.drawing.nx_pylab as nx_pylab
|
| 7 |
+
import matplotlib.colors as mcolors
|
| 8 |
+
from mpl_toolkits.mplot3d import Axes3D
|
| 9 |
+
from matplotlib.axes._axes import Axes
|
| 10 |
+
from matplotlib.axes._base import _process_plot_var_args
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import matplotlib
|
| 13 |
+
import squarify
|
| 14 |
+
|
| 15 |
+
import warnings
|
| 16 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 17 |
+
|
| 18 |
+
# sys.path.insert(0, f'{os.environ["PROJECT_PATH"]}')
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
drawed_colors = []
|
| 22 |
+
in_decorator = False
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def convert_color_to_hex(color):
|
| 26 |
+
'Convert color from name, RGBA, or hex to a hex format.'
|
| 27 |
+
try:
|
| 28 |
+
# First, try to convert from color name to RGBA to hex
|
| 29 |
+
if isinstance(color, str):
|
| 30 |
+
# Check if it's already a hex color (start with '#' and length
|
| 31 |
+
# either 7 or 9)
|
| 32 |
+
if color.startswith('#') and (len(color) == 7 or len(color) == 9):
|
| 33 |
+
return color.upper()
|
| 34 |
+
else:
|
| 35 |
+
return mcolors.to_hex(mcolors.to_rgba(color)).upper()
|
| 36 |
+
# Then, check if it's in RGBA format
|
| 37 |
+
elif isinstance(color, (list, tuple)) and len(color) == 4:
|
| 38 |
+
return mcolors.to_hex(color).upper()
|
| 39 |
+
else:
|
| 40 |
+
raise ValueError("Unsupported color format")
|
| 41 |
+
except ValueError as e:
|
| 42 |
+
print(color)
|
| 43 |
+
print("Error converting color:", e)
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def log_function_specific_for_draw_networkx_labels(func):
|
| 48 |
+
def wrapper(
|
| 49 |
+
G,
|
| 50 |
+
pos,
|
| 51 |
+
labels=None,
|
| 52 |
+
font_size=12,
|
| 53 |
+
font_color="k",
|
| 54 |
+
font_family="sans-serif",
|
| 55 |
+
font_weight="normal",
|
| 56 |
+
alpha=None,
|
| 57 |
+
bbox=None,
|
| 58 |
+
horizontalalignment="center",
|
| 59 |
+
verticalalignment="center",
|
| 60 |
+
ax=None,
|
| 61 |
+
clip_on=True,
|
| 62 |
+
):
|
| 63 |
+
global drawed_colors
|
| 64 |
+
global in_decorator
|
| 65 |
+
|
| 66 |
+
if not in_decorator:
|
| 67 |
+
in_decorator = True
|
| 68 |
+
|
| 69 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 70 |
+
|
| 71 |
+
result = func(
|
| 72 |
+
G,
|
| 73 |
+
pos,
|
| 74 |
+
labels=labels,
|
| 75 |
+
font_size=font_size,
|
| 76 |
+
font_color=font_color,
|
| 77 |
+
font_family=font_family,
|
| 78 |
+
font_weight=font_weight,
|
| 79 |
+
alpha=alpha,
|
| 80 |
+
bbox=bbox,
|
| 81 |
+
horizontalalignment=horizontalalignment,
|
| 82 |
+
verticalalignment=verticalalignment,
|
| 83 |
+
ax=ax,
|
| 84 |
+
clip_on=clip_on
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
for item in result.values():
|
| 88 |
+
color = convert_color_to_hex(item.get_color())
|
| 89 |
+
drawed_colors.append(func_name + "--" + color)
|
| 90 |
+
|
| 91 |
+
in_decorator = False
|
| 92 |
+
else:
|
| 93 |
+
return func(
|
| 94 |
+
G,
|
| 95 |
+
pos,
|
| 96 |
+
labels=labels,
|
| 97 |
+
font_size=font_size,
|
| 98 |
+
font_color=font_color,
|
| 99 |
+
font_family=font_family,
|
| 100 |
+
font_weight=font_weight,
|
| 101 |
+
alpha=alpha,
|
| 102 |
+
bbox=bbox,
|
| 103 |
+
horizontalalignment=horizontalalignment,
|
| 104 |
+
verticalalignment=verticalalignment,
|
| 105 |
+
ax=ax,
|
| 106 |
+
clip_on=clip_on
|
| 107 |
+
)
|
| 108 |
+
return result
|
| 109 |
+
wrapper.__name__ = func.__name__
|
| 110 |
+
return wrapper
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def log_function_specific_for_draw_networkx_edges(func):
|
| 114 |
+
def wrapper(
|
| 115 |
+
G,
|
| 116 |
+
pos,
|
| 117 |
+
edgelist=None,
|
| 118 |
+
width=1.0,
|
| 119 |
+
edge_color="k",
|
| 120 |
+
style="solid",
|
| 121 |
+
alpha=None,
|
| 122 |
+
arrowstyle=None,
|
| 123 |
+
arrowsize=10,
|
| 124 |
+
edge_cmap=None,
|
| 125 |
+
edge_vmin=None,
|
| 126 |
+
edge_vmax=None,
|
| 127 |
+
ax=None,
|
| 128 |
+
arrows=None,
|
| 129 |
+
label=None,
|
| 130 |
+
node_size=300,
|
| 131 |
+
nodelist=None,
|
| 132 |
+
node_shape="o",
|
| 133 |
+
connectionstyle="arc3",
|
| 134 |
+
min_source_margin=0,
|
| 135 |
+
min_target_margin=0,
|
| 136 |
+
):
|
| 137 |
+
global drawed_colors
|
| 138 |
+
global in_decorator
|
| 139 |
+
|
| 140 |
+
if not in_decorator:
|
| 141 |
+
in_decorator = True
|
| 142 |
+
|
| 143 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 144 |
+
|
| 145 |
+
result = func(
|
| 146 |
+
G,
|
| 147 |
+
pos,
|
| 148 |
+
edgelist=edgelist,
|
| 149 |
+
width=width,
|
| 150 |
+
edge_color=edge_color,
|
| 151 |
+
style=style,
|
| 152 |
+
alpha=alpha,
|
| 153 |
+
arrowstyle=arrowstyle,
|
| 154 |
+
arrowsize=arrowsize,
|
| 155 |
+
edge_cmap=edge_cmap,
|
| 156 |
+
edge_vmin=edge_vmin,
|
| 157 |
+
edge_vmax=edge_vmax,
|
| 158 |
+
ax=ax,
|
| 159 |
+
arrows=arrows,
|
| 160 |
+
label=label,
|
| 161 |
+
node_size=node_size,
|
| 162 |
+
nodelist=nodelist,
|
| 163 |
+
node_shape=node_shape,
|
| 164 |
+
connectionstyle=connectionstyle,
|
| 165 |
+
min_source_margin=min_source_margin,
|
| 166 |
+
min_target_margin=min_target_margin
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
for item in result.get_edgecolors().tolist():
|
| 170 |
+
color = convert_color_to_hex(item)
|
| 171 |
+
drawed_colors.append(func_name + "--" + color)
|
| 172 |
+
|
| 173 |
+
in_decorator = False
|
| 174 |
+
else:
|
| 175 |
+
return func(
|
| 176 |
+
G,
|
| 177 |
+
pos,
|
| 178 |
+
edgelist=edgelist,
|
| 179 |
+
width=width,
|
| 180 |
+
edge_color=edge_color,
|
| 181 |
+
style=style,
|
| 182 |
+
alpha=alpha,
|
| 183 |
+
arrowstyle=arrowstyle,
|
| 184 |
+
arrowsize=arrowsize,
|
| 185 |
+
edge_cmap=edge_cmap,
|
| 186 |
+
edge_vmin=edge_vmin,
|
| 187 |
+
edge_vmax=edge_vmax,
|
| 188 |
+
ax=ax,
|
| 189 |
+
arrows=arrows,
|
| 190 |
+
label=label,
|
| 191 |
+
node_size=node_size,
|
| 192 |
+
nodelist=nodelist,
|
| 193 |
+
node_shape=node_shape,
|
| 194 |
+
connectionstyle=connectionstyle,
|
| 195 |
+
min_source_margin=min_source_margin,
|
| 196 |
+
min_target_margin=min_target_margin
|
| 197 |
+
)
|
| 198 |
+
return result
|
| 199 |
+
wrapper.__name__ = func.__name__
|
| 200 |
+
return wrapper
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def log_function_specific_for_draw_networkx_nodes(func):
|
| 204 |
+
def wrapper(
|
| 205 |
+
G,
|
| 206 |
+
pos,
|
| 207 |
+
nodelist=None,
|
| 208 |
+
node_size=300,
|
| 209 |
+
node_color="#1f78b4",
|
| 210 |
+
node_shape="o",
|
| 211 |
+
alpha=None,
|
| 212 |
+
cmap=None,
|
| 213 |
+
vmin=None,
|
| 214 |
+
vmax=None,
|
| 215 |
+
ax=None,
|
| 216 |
+
linewidths=None,
|
| 217 |
+
edgecolors=None,
|
| 218 |
+
label=None,
|
| 219 |
+
margins=None,
|
| 220 |
+
):
|
| 221 |
+
global drawed_colors
|
| 222 |
+
global in_decorator
|
| 223 |
+
|
| 224 |
+
if not in_decorator:
|
| 225 |
+
in_decorator = True
|
| 226 |
+
|
| 227 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 228 |
+
|
| 229 |
+
result = func(
|
| 230 |
+
G,
|
| 231 |
+
pos,
|
| 232 |
+
nodelist=nodelist,
|
| 233 |
+
node_size=node_size,
|
| 234 |
+
node_color=node_color,
|
| 235 |
+
node_shape=node_shape,
|
| 236 |
+
alpha=alpha,
|
| 237 |
+
cmap=cmap,
|
| 238 |
+
vmin=vmin,
|
| 239 |
+
vmax=vmax,
|
| 240 |
+
ax=ax,
|
| 241 |
+
linewidths=linewidths,
|
| 242 |
+
edgecolors=edgecolors,
|
| 243 |
+
label=label,
|
| 244 |
+
margins=margins
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
for item in result.get_facecolors().tolist():
|
| 248 |
+
color = convert_color_to_hex(item)
|
| 249 |
+
drawed_colors.append(func_name + "--" + color)
|
| 250 |
+
|
| 251 |
+
in_decorator = False
|
| 252 |
+
else:
|
| 253 |
+
return func(
|
| 254 |
+
G,
|
| 255 |
+
pos,
|
| 256 |
+
nodelist=nodelist,
|
| 257 |
+
node_size=node_size,
|
| 258 |
+
node_color=node_color,
|
| 259 |
+
node_shape=node_shape,
|
| 260 |
+
alpha=alpha,
|
| 261 |
+
cmap=cmap,
|
| 262 |
+
vmin=vmin,
|
| 263 |
+
vmax=vmax,
|
| 264 |
+
ax=ax,
|
| 265 |
+
linewidths=linewidths,
|
| 266 |
+
edgecolors=edgecolors,
|
| 267 |
+
label=label,
|
| 268 |
+
margins=margins
|
| 269 |
+
)
|
| 270 |
+
return result
|
| 271 |
+
wrapper.__name__ = func.__name__
|
| 272 |
+
return wrapper
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def log_function_for_3d(func):
|
| 276 |
+
def wrapper(*args, **kwargs):
|
| 277 |
+
global drawed_colors
|
| 278 |
+
global in_decorator
|
| 279 |
+
|
| 280 |
+
if not in_decorator:
|
| 281 |
+
in_decorator = True
|
| 282 |
+
|
| 283 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 284 |
+
|
| 285 |
+
result = func(*args, **kwargs)
|
| 286 |
+
|
| 287 |
+
if func.__name__ == "scatter":
|
| 288 |
+
# check whether cmap is used
|
| 289 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 290 |
+
print("cmap is used", kwargs["cmap"])
|
| 291 |
+
drawed_colors.append(func_name + "--" + kwargs["cmap"])
|
| 292 |
+
else:
|
| 293 |
+
for item in result.get_facecolors().tolist():
|
| 294 |
+
color = convert_color_to_hex(item)
|
| 295 |
+
drawed_colors.append(func_name + "--" + color)
|
| 296 |
+
elif func.__name__ == "plot":
|
| 297 |
+
for line in result:
|
| 298 |
+
color = convert_color_to_hex(line.get_color())
|
| 299 |
+
drawed_colors.append(func_name + "--" + color)
|
| 300 |
+
elif func.__name__ == "plot_surface":
|
| 301 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 302 |
+
print("cmap is used", kwargs["cmap"])
|
| 303 |
+
drawed_colors.append(func_name + "--" + kwargs["cmap"])
|
| 304 |
+
else:
|
| 305 |
+
colors = result.get_facecolors().tolist()
|
| 306 |
+
drawed_colors.append(
|
| 307 |
+
func_name +
|
| 308 |
+
"--" +
|
| 309 |
+
convert_color_to_hex(
|
| 310 |
+
colors[0]))
|
| 311 |
+
elif func.__name__ == "bar3d":
|
| 312 |
+
colors = result.get_facecolors().tolist()
|
| 313 |
+
drawed_colors.append(
|
| 314 |
+
func_name +
|
| 315 |
+
"--" +
|
| 316 |
+
convert_color_to_hex(
|
| 317 |
+
colors[0]))
|
| 318 |
+
elif func.__name__ == "bar":
|
| 319 |
+
for item in result:
|
| 320 |
+
color = convert_color_to_hex(item.get_facecolor())
|
| 321 |
+
drawed_colors.append(func_name + "--" + color)
|
| 322 |
+
elif func.__name__ == "add_collection3d":
|
| 323 |
+
colors = result.get_facecolors().tolist()
|
| 324 |
+
for color in colors:
|
| 325 |
+
drawed_colors.append(
|
| 326 |
+
func_name + "--" + convert_color_to_hex(color))
|
| 327 |
+
|
| 328 |
+
in_decorator = False
|
| 329 |
+
else:
|
| 330 |
+
return func(*args, **kwargs)
|
| 331 |
+
return result
|
| 332 |
+
|
| 333 |
+
wrapper.__name__ = func.__name__
|
| 334 |
+
return wrapper
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def log_function(func):
|
| 338 |
+
def wrapper(*args, **kwargs):
|
| 339 |
+
global drawed_colors
|
| 340 |
+
global in_decorator
|
| 341 |
+
|
| 342 |
+
if not in_decorator:
|
| 343 |
+
in_decorator = True
|
| 344 |
+
|
| 345 |
+
func_name = inspect.getfile(func) + "/" + func.__name__
|
| 346 |
+
|
| 347 |
+
result = func(*args, **kwargs)
|
| 348 |
+
|
| 349 |
+
if func.__name__ == "_makeline":
|
| 350 |
+
color = convert_color_to_hex(result[1]["color"])
|
| 351 |
+
drawed_colors.append(func_name + "--" + color)
|
| 352 |
+
elif func.__name__ == "axhline":
|
| 353 |
+
color = convert_color_to_hex(result.get_color())
|
| 354 |
+
drawed_colors.append(func_name + "--" + color)
|
| 355 |
+
elif func.__name__ == "axvline":
|
| 356 |
+
color = convert_color_to_hex(result.get_color())
|
| 357 |
+
drawed_colors.append(func_name + "--" + color)
|
| 358 |
+
elif func.__name__ == "_fill_between_x_or_y":
|
| 359 |
+
color = convert_color_to_hex(list(result.get_facecolors()[0]))
|
| 360 |
+
drawed_colors.append(func_name + "--" + color)
|
| 361 |
+
elif func.__name__ == "bar":
|
| 362 |
+
for item in result:
|
| 363 |
+
color = convert_color_to_hex(
|
| 364 |
+
list(item._original_facecolor))
|
| 365 |
+
drawed_colors.append(func_name + "--" + color)
|
| 366 |
+
elif func.__name__ == "scatter" and not isinstance(args[0], PolarAxes):
|
| 367 |
+
# check whether cmap is used
|
| 368 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 369 |
+
print("cmap is used", kwargs["cmap"])
|
| 370 |
+
drawed_colors.append(func_name + "--" + kwargs["cmap"])
|
| 371 |
+
else:
|
| 372 |
+
color = convert_color_to_hex(
|
| 373 |
+
list(result.get_facecolor()[0]))
|
| 374 |
+
drawed_colors.append(func_name + "--" + color)
|
| 375 |
+
elif func.__name__ == "pie":
|
| 376 |
+
for item in result[0]:
|
| 377 |
+
color = convert_color_to_hex(item.get_facecolor())
|
| 378 |
+
drawed_colors.append(func_name + "--" + color)
|
| 379 |
+
elif func.__name__ == "axvspan":
|
| 380 |
+
color = convert_color_to_hex(result.get_facecolor())
|
| 381 |
+
drawed_colors.append(func_name + "--" + color)
|
| 382 |
+
elif func.__name__ == "axhspan":
|
| 383 |
+
color = convert_color_to_hex(result.get_facecolor())
|
| 384 |
+
drawed_colors.append(func_name + "--" + color)
|
| 385 |
+
elif func.__name__ == "hlines":
|
| 386 |
+
for item in result.get_edgecolors():
|
| 387 |
+
color = convert_color_to_hex(list(item))
|
| 388 |
+
drawed_colors.append(func_name + "--" + color)
|
| 389 |
+
elif func.__name__ == "vlines":
|
| 390 |
+
for item in result.get_edgecolors():
|
| 391 |
+
color = convert_color_to_hex(list(item))
|
| 392 |
+
drawed_colors.append(func_name + "--" + color)
|
| 393 |
+
elif func.__name__ == "boxplot":
|
| 394 |
+
for item in result["boxes"]:
|
| 395 |
+
if isinstance(item, matplotlib.patches.PathPatch):
|
| 396 |
+
color = convert_color_to_hex(
|
| 397 |
+
list(item.get_facecolor()))
|
| 398 |
+
drawed_colors.append(func_name + "--" + color)
|
| 399 |
+
elif func.__name__ == "violinplot":
|
| 400 |
+
for item in result["bodies"]:
|
| 401 |
+
color = convert_color_to_hex(list(item.get_facecolor()[0]))
|
| 402 |
+
drawed_colors.append(func_name + "--" + color)
|
| 403 |
+
elif func.__name__ == "hist":
|
| 404 |
+
tops, bins, patches = result
|
| 405 |
+
if not isinstance(patches, matplotlib.cbook.silent_list):
|
| 406 |
+
for item in patches:
|
| 407 |
+
color = convert_color_to_hex(
|
| 408 |
+
list(item.get_facecolor()))
|
| 409 |
+
drawed_colors.append(func_name + "--" + color)
|
| 410 |
+
else:
|
| 411 |
+
for container in patches:
|
| 412 |
+
for item in container:
|
| 413 |
+
color = convert_color_to_hex(
|
| 414 |
+
list(item.get_facecolor()))
|
| 415 |
+
drawed_colors.append(func_name + "--" + color)
|
| 416 |
+
elif func.__name__ == "quiver":
|
| 417 |
+
for item in result.get_facecolors().tolist():
|
| 418 |
+
color = convert_color_to_hex(item)
|
| 419 |
+
drawed_colors.append(func_name + "--" + color)
|
| 420 |
+
elif func.__name__ == "plot" and len(args) > 0 and isinstance(args[0], PolarAxes):
|
| 421 |
+
lines = result
|
| 422 |
+
for line in lines:
|
| 423 |
+
color = convert_color_to_hex(line.get_color())
|
| 424 |
+
drawed_colors.append(func_name + "_polar" + "--" + color)
|
| 425 |
+
elif func.__name__ == "scatter" and isinstance(args[0], PolarAxes):
|
| 426 |
+
# check whether cmap is used
|
| 427 |
+
if "cmap" in kwargs and kwargs["cmap"] is not None:
|
| 428 |
+
print("cmap is used", kwargs["cmap"])
|
| 429 |
+
drawed_colors.append(func_name + "--" + kwargs["cmap"])
|
| 430 |
+
else:
|
| 431 |
+
color = convert_color_to_hex(
|
| 432 |
+
list(result.get_facecolor()[0]))
|
| 433 |
+
drawed_colors.append(func_name + "_polar" + "--" + color)
|
| 434 |
+
elif func.__name__ == "plot" and "squarify" in func_name:
|
| 435 |
+
# get ax
|
| 436 |
+
ax = result
|
| 437 |
+
# get container
|
| 438 |
+
containers = ax.containers
|
| 439 |
+
for container in containers:
|
| 440 |
+
for item in container:
|
| 441 |
+
color = convert_color_to_hex(
|
| 442 |
+
list(item.get_facecolor()))
|
| 443 |
+
drawed_colors.append(
|
| 444 |
+
func_name + "_squarify" + "--" + color)
|
| 445 |
+
elif func.__name__ == "imshow":
|
| 446 |
+
colormap = result.get_cmap().name
|
| 447 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 448 |
+
elif func.__name__ == "pcolor":
|
| 449 |
+
colormap = result.get_cmap().name
|
| 450 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 451 |
+
elif func.__name__ == "contour":
|
| 452 |
+
colormap = result.get_cmap().name
|
| 453 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 454 |
+
elif func.__name__ == "contourf":
|
| 455 |
+
colormap = result.get_cmap().name
|
| 456 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 457 |
+
elif func.__name__ == "fill":
|
| 458 |
+
patches = result
|
| 459 |
+
for patch in patches:
|
| 460 |
+
color = convert_color_to_hex(list(patch.get_facecolor()))
|
| 461 |
+
drawed_colors.append(func_name + "--" + color)
|
| 462 |
+
elif func.__name__ == "__init__" and isinstance(args[0], NonUniformImage):
|
| 463 |
+
colormap = args[0].get_cmap().name
|
| 464 |
+
drawed_colors.append(func_name + "--" + colormap)
|
| 465 |
+
elif func.__name__ == "broken_barh":
|
| 466 |
+
colors = result.get_facecolors().tolist()
|
| 467 |
+
for color in colors:
|
| 468 |
+
drawed_colors.append(
|
| 469 |
+
func_name + "--" + convert_color_to_hex(color))
|
| 470 |
+
elif func.__name__ == "__init__" and isinstance(args[0], Ellipse):
|
| 471 |
+
color = convert_color_to_hex(args[0].get_facecolor())
|
| 472 |
+
drawed_colors.append(func_name + "--" + color)
|
| 473 |
+
|
| 474 |
+
in_decorator = False
|
| 475 |
+
else:
|
| 476 |
+
return func(*args, **kwargs)
|
| 477 |
+
return result
|
| 478 |
+
wrapper.__name__ = func.__name__
|
| 479 |
+
return wrapper
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
_process_plot_var_args._makeline = log_function(
|
| 483 |
+
_process_plot_var_args._makeline)
|
| 484 |
+
Axes.bar = log_function(Axes.bar)
|
| 485 |
+
Axes.scatter = log_function(Axes.scatter)
|
| 486 |
+
Axes.axhline = log_function(Axes.axhline)
|
| 487 |
+
Axes.axvline = log_function(Axes.axvline)
|
| 488 |
+
Axes._fill_between_x_or_y = log_function(Axes._fill_between_x_or_y)
|
| 489 |
+
Axes.pie = log_function(Axes.pie)
|
| 490 |
+
Axes.axvspan = log_function(Axes.axvspan)
|
| 491 |
+
Axes.axhspan = log_function(Axes.axhspan)
|
| 492 |
+
Axes.hlines = log_function(Axes.hlines)
|
| 493 |
+
Axes.vlines = log_function(Axes.vlines)
|
| 494 |
+
Axes.boxplot = log_function(Axes.boxplot)
|
| 495 |
+
Axes.violinplot = log_function(Axes.violinplot)
|
| 496 |
+
Axes.hist = log_function(Axes.hist)
|
| 497 |
+
Axes.plot = log_function(Axes.plot)
|
| 498 |
+
Axes.quiver = log_function(Axes.quiver)
|
| 499 |
+
Axes.imshow = log_function(Axes.imshow)
|
| 500 |
+
Axes.pcolor = log_function(Axes.pcolor)
|
| 501 |
+
Axes.contour = log_function(Axes.contour)
|
| 502 |
+
Axes.contourf = log_function(Axes.contourf)
|
| 503 |
+
Axes.fill = log_function(Axes.fill)
|
| 504 |
+
NonUniformImage.__init__ = log_function(NonUniformImage.__init__)
|
| 505 |
+
Ellipse.__init__ = log_function(Ellipse.__init__)
|
| 506 |
+
Axes.broken_barh = log_function(Axes.broken_barh)
|
| 507 |
+
|
| 508 |
+
nx_pylab.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes(
|
| 509 |
+
nx_pylab.draw_networkx_nodes)
|
| 510 |
+
nx_pylab.draw_networkx_edges = log_function_specific_for_draw_networkx_edges(
|
| 511 |
+
nx_pylab.draw_networkx_edges)
|
| 512 |
+
nx_pylab.draw_networkx_labels = log_function_specific_for_draw_networkx_labels(
|
| 513 |
+
nx_pylab.draw_networkx_labels)
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
squarify.plot = log_function(squarify.plot)
|
| 517 |
+
|
| 518 |
+
Axes3D.scatter = log_function_for_3d(Axes3D.scatter)
|
| 519 |
+
Axes3D.plot = log_function_for_3d(Axes3D.plot)
|
| 520 |
+
Axes3D.plot_surface = log_function_for_3d(Axes3D.plot_surface)
|
| 521 |
+
Axes3D.bar3d = log_function_for_3d(Axes3D.bar3d)
|
| 522 |
+
Axes3D.bar = log_function_for_3d(Axes3D.bar)
|
| 523 |
+
Axes3D.add_collection3d = log_function_for_3d(Axes3D.add_collection3d)
|
| 524 |
+
|
| 525 |
+
# barh test
|
| 526 |
+
# draw a simple barh plot
|
| 527 |
+
# fig, ax = plt.subplots()
|
| 528 |
+
# ax.barh(np.arange(5), np.random.rand(5))
|
| 529 |
+
# ax.barh(np.arange(5), np.random.rand(5))
|
| 530 |
+
# plt.show()
|
| 531 |
+
|
| 532 |
+
# axhline test
|
| 533 |
+
# fig, ax = plt.subplots()
|
| 534 |
+
# ax.axhline(0.5)
|
| 535 |
+
# ax.axhline(0.8)
|
| 536 |
+
# plt.show()
|
| 537 |
+
|
| 538 |
+
# axvline test
|
| 539 |
+
# fig, ax = plt.subplots()
|
| 540 |
+
# ax.axvline(0.5)
|
| 541 |
+
# ax.axvline(0.8)
|
| 542 |
+
# plt.show()
|
| 543 |
+
|
| 544 |
+
# errorbar test
|
| 545 |
+
# fig, ax = plt.subplots()
|
| 546 |
+
# x = np.arange(10)
|
| 547 |
+
# y = np.sin(x)
|
| 548 |
+
#
|
| 549 |
+
# ax.errorbar(x, y, yerr=0.1)
|
| 550 |
+
# ax.errorbar(x, y, yerr=0.2)
|
| 551 |
+
# plt.show()
|
| 552 |
+
|
| 553 |
+
# squarify test
|
| 554 |
+
# fig, ax = plt.subplots()
|
| 555 |
+
# sizes = [50, 25, 25]
|
| 556 |
+
# squarify.plot(sizes=sizes, ax=ax)
|
| 557 |
+
# plt.savefig("tmp.png")
|
| 558 |
+
# plt.show()
|
| 559 |
+
|
| 560 |
+
# loglog test
|
| 561 |
+
# fig, ax = plt.subplots()
|
| 562 |
+
# x = np.linspace(0, 10, 100)
|
| 563 |
+
# y = x**2
|
| 564 |
+
# ax.loglog(x, y)
|
| 565 |
+
# plt.show()
|
| 566 |
+
|
| 567 |
+
# fill_between test
|
| 568 |
+
# fig, ax = plt.subplots()
|
| 569 |
+
# x = np.arange(10)
|
| 570 |
+
# y1 = np.sin(x)
|
| 571 |
+
# y2 = np.cos(x)
|
| 572 |
+
# ax.fill_between(x, y1, y2, cmap='viridis')
|
| 573 |
+
# plt.show()
|
| 574 |
+
|
| 575 |
+
# fill_betweenx test
|
| 576 |
+
# fig, ax = plt.subplots()
|
| 577 |
+
# x = np.arange(10)
|
| 578 |
+
# y1 = np.sin(x)
|
| 579 |
+
# y2 = np.cos(x)
|
| 580 |
+
# ax.fill_betweenx(x, y1, y2, cmap='viridis')
|
| 581 |
+
# plt.show()
|
| 582 |
+
|
| 583 |
+
# pie test
|
| 584 |
+
# fig, ax = plt.subplots()
|
| 585 |
+
# sizes = [50, 25, 25]
|
| 586 |
+
# ax.pie(sizes)
|
| 587 |
+
# plt.savefig("tmp.png")
|
| 588 |
+
# plt.show()
|
| 589 |
+
|
| 590 |
+
# axvspan test
|
| 591 |
+
# fig, ax = plt.subplots()
|
| 592 |
+
# ax.axvspan(0.2, 0.3, color='red', alpha=0.5)
|
| 593 |
+
# ax.axvspan(0.5, 0.7, color='blue', alpha=0.5)
|
| 594 |
+
# plt.show()
|
| 595 |
+
|
| 596 |
+
# axhspan test
|
| 597 |
+
# fig, ax = plt.subplots()
|
| 598 |
+
# ax.axhspan(0.2, 0.3, color='red', alpha=0.5)
|
| 599 |
+
# ax.axhspan(0.5, 0.7, color='blue', alpha=0.5)
|
| 600 |
+
# plt.show()
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
# hlines test
|
| 604 |
+
# fig, ax = plt.subplots()
|
| 605 |
+
# y_values = [1, 2, 3, 4, 5]
|
| 606 |
+
# xmin = 0
|
| 607 |
+
# xmax = 10
|
| 608 |
+
# ax.hlines(y=y_values, xmin=xmin, xmax=xmax, linestyles='dashed')
|
| 609 |
+
# ax.set_xlabel('X-axis')
|
| 610 |
+
# ax.set_ylabel('Y-axis')
|
| 611 |
+
# plt.savefig("tmp.png")
|
| 612 |
+
# plt.show()
|
| 613 |
+
|
| 614 |
+
# vlines test
|
| 615 |
+
# fig, ax = plt.subplots()
|
| 616 |
+
# x_values = [1, 2, 3, 4, 5]
|
| 617 |
+
# ymin = 0
|
| 618 |
+
# ymax = 10
|
| 619 |
+
# ax.vlines(x=x_values, ymin=ymin, ymax=ymax, linestyles='dashed')
|
| 620 |
+
# ax.set_xlabel('X-axis')
|
| 621 |
+
# ax.set_ylabel('Y-axis')
|
| 622 |
+
# plt.savefig("tmp.png")
|
| 623 |
+
# plt.show()
|
| 624 |
+
|
| 625 |
+
# boxplot test
|
| 626 |
+
# fig, ax = plt.subplots()
|
| 627 |
+
# data = np.random.rand(10, 3)
|
| 628 |
+
# ax.boxplot(data, patch_artist=True)
|
| 629 |
+
# plt.savefig("tmp.png")
|
| 630 |
+
# plt.show()
|
| 631 |
+
|
| 632 |
+
# violin test
|
| 633 |
+
# fig, ax = plt.subplots()
|
| 634 |
+
# data = np.random.rand(10, 3)
|
| 635 |
+
# ax.violinplot(data)
|
| 636 |
+
# plt.savefig("tmp.png")
|
| 637 |
+
# plt.show()
|
| 638 |
+
|
| 639 |
+
# hist test
|
| 640 |
+
# fig, ax = plt.subplots()
|
| 641 |
+
# data = np.random.rand(100, 1)
|
| 642 |
+
# ax.hist(data, bins=10)
|
| 643 |
+
# plt.savefig("tmp.png")
|
| 644 |
+
# plt.show()
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
# networkx test
|
| 648 |
+
# fig, ax = plt.subplots()
|
| 649 |
+
# G = networkx.complete_graph(5)
|
| 650 |
+
# draw the graph, give each node a different color, and a label. make the edges red and blue, with labels
|
| 651 |
+
# networkx.draw(G, ax=ax, node_color='r', edge_color='b', labels={0: '0', 1: '1', 2: '2', 3: '3', 4: '4'})
|
| 652 |
+
# plt.savefig("tmp.png")
|
| 653 |
+
# plt.show()
|
| 654 |
+
|
| 655 |
+
# quiver test
|
| 656 |
+
# fig, ax = plt.subplots()
|
| 657 |
+
# x = np.linspace(0, 10, 10)
|
| 658 |
+
# y = np.linspace(0, 10, 10)
|
| 659 |
+
# u = np.zeros(10)
|
| 660 |
+
# v = np.ones(10)
|
| 661 |
+
# # draw the quiver plot, with color red
|
| 662 |
+
# ax.quiver(x, y, u, v, color='r')
|
| 663 |
+
# plt.savefig("tmp.png")
|
| 664 |
+
# plt.show()
|
| 665 |
+
|
| 666 |
+
# 3d scatter test
|
| 667 |
+
# fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
|
| 668 |
+
# x = np.random.rand(10)
|
| 669 |
+
# y = np.random.rand(10)
|
| 670 |
+
# z = np.random.rand(10)
|
| 671 |
+
# draw the scatter plot, with color red
|
| 672 |
+
# ax.scatter3D(x, y, z, c='#ff2395')
|
| 673 |
+
# plt.savefig("tmp.png")
|
| 674 |
+
# plt.show()
|
| 675 |
+
|
| 676 |
+
# 3d plot test
|
| 677 |
+
# fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
|
| 678 |
+
# draw two lines in 3d, with color red and blue
|
| 679 |
+
# ax.plot([0, 1], [0, 1], [0, 1], color='r')
|
| 680 |
+
# ax.plot([0, 1], [0, 1], [1, 0], color='b')
|
| 681 |
+
|
| 682 |
+
# 3d plot_surface test
|
| 683 |
+
# fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
|
| 684 |
+
# draw a surface plot, with a beautiful colormap
|
| 685 |
+
# X = np.linspace(-5, 5, 100)
|
| 686 |
+
# Y = np.linspace(-5, 5, 100)
|
| 687 |
+
# X, Y = np.meshgrid(X, Y)
|
| 688 |
+
# Z = np.sin(np.sqrt(X**2 + Y**2))
|
| 689 |
+
# ax.plot_surface(X, Y, Z, cmap='viridis')
|
| 690 |
+
# plt.savefig("tmp.png")
|
| 691 |
+
# plt.show()
|
| 692 |
+
|
| 693 |
+
# 3d bar test
|
| 694 |
+
# fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
|
| 695 |
+
# x = np.arange(10)
|
| 696 |
+
# y = np.random.rand(10)
|
| 697 |
+
# z = np.zeros(10)
|
| 698 |
+
# dx = np.ones(10)
|
| 699 |
+
# dy = np.ones(10)
|
| 700 |
+
# dz = np.random.rand(10)
|
| 701 |
+
# # draw the 3d bar plot, with color red
|
| 702 |
+
# ax.bar3d(x, y, z, dx, dy, dz)
|
| 703 |
+
# plt.savefig("tmp.png")
|
| 704 |
+
# plt.show()
|
| 705 |
+
|
| 706 |
+
# # bar2d in axes3d test
|
| 707 |
+
# fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
|
| 708 |
+
# x = np.arange(10)
|
| 709 |
+
# y = np.random.rand(10)
|
| 710 |
+
# z = np.zeros(10)
|
| 711 |
+
# dx = np.ones(10)
|
| 712 |
+
# dy = np.ones(10)
|
| 713 |
+
# dz = np.random.rand(10)
|
| 714 |
+
# # draw the 2d bar plot, with color red
|
| 715 |
+
# ax.bar(x, y, z, zdir='y', color=['r', 'b', 'g', 'y', 'm', 'c', 'k', 'w', 'r', 'b'])
|
| 716 |
+
# plt.savefig("tmp.png")
|
| 717 |
+
# plt.show()
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
# plot in test
|
| 721 |
+
# fig, ax = plt.subplots()
|
| 722 |
+
# x = np.linspace(0, 10, 100)
|
| 723 |
+
# y = np.sin(x)
|
| 724 |
+
# draw the plot, with color red
|
| 725 |
+
# ax.plot(x, y, color='r')
|
| 726 |
+
# plt.savefig("tmp.png")
|
| 727 |
+
# plt.show()
|
| 728 |
+
|
| 729 |
+
# matshow in test
|
| 730 |
+
# fig, ax = plt.subplots()
|
| 731 |
+
# data = np.random.rand(10, 10)
|
| 732 |
+
# draw the matshow plot, with a beautiful colormap
|
| 733 |
+
# ax.imshow(data, cmap='pink')
|
| 734 |
+
# plt.savefig("tmp.png")
|
| 735 |
+
# plt.show()
|
| 736 |
+
|
| 737 |
+
# pcolor in test
|
| 738 |
+
# fig, ax = plt.subplots()
|
| 739 |
+
# data = np.random.rand(10, 10)
|
| 740 |
+
# draw the pcolor plot, with a beautiful colormap
|
| 741 |
+
# ax.pcolor(data)
|
| 742 |
+
# plt.savefig("tmp.png")
|
| 743 |
+
# plt.show()
|
| 744 |
+
|
| 745 |
+
# # contour in test
|
| 746 |
+
# fig, ax = plt.subplots()
|
| 747 |
+
# x = np.linspace(0, 10, 100)
|
| 748 |
+
# y = np.linspace(0, 10, 100)
|
| 749 |
+
# X, Y = np.meshgrid(x, y)
|
| 750 |
+
# Z = np.sin(X) * np.cos(Y)
|
| 751 |
+
# # draw the contour plot, with a beautiful colormap
|
| 752 |
+
# ax.contour(X, Y, Z)
|
| 753 |
+
# plt.savefig("tmp.png")
|
| 754 |
+
# plt.show()
|
| 755 |
+
|
| 756 |
+
# # contourf in test
|
| 757 |
+
# fig, ax = plt.subplots()
|
| 758 |
+
# x = np.linspace(0, 10, 100)
|
| 759 |
+
# y = np.linspace(0, 10, 100)
|
| 760 |
+
# X, Y = np.meshgrid(x, y)
|
| 761 |
+
# Z = np.sin(X) * np.cos(Y)
|
| 762 |
+
# # draw the contourf plot, with a beautiful colormap
|
| 763 |
+
# ax.contourf(X, Y, Z, cmap='viridis')
|
| 764 |
+
# plt.savefig("tmp.png")
|
| 765 |
+
# plt.show()
|
| 766 |
+
|
| 767 |
+
# stackplot in test
|
| 768 |
+
# fig, ax = plt.subplots()
|
| 769 |
+
# x = np.linspace(0, 10, 100)
|
| 770 |
+
# y1 = np.sin(x)
|
| 771 |
+
# y2 = np.cos(x)
|
| 772 |
+
# y3 = np.tan(x)
|
| 773 |
+
# draw the stackplot, with beautiful colors
|
| 774 |
+
# ax.stackplot(x, y1, y2, y3, colors=['r', 'g', 'b'])
|
| 775 |
+
# plt.savefig("tmp.png")
|
| 776 |
+
# plt.show()
|
| 777 |
+
|
| 778 |
+
# fill in test
|
| 779 |
+
# fig, ax = plt.subplots()
|
| 780 |
+
# x = np.linspace(0, 10, 100)
|
| 781 |
+
# y1 = np.sin(x)
|
| 782 |
+
# y2 = np.cos(x)
|
| 783 |
+
# draw the fill plot, with color red
|
| 784 |
+
# ax.fill(x, y1, color='r')
|
| 785 |
+
# plt.savefig("tmp.png")
|
| 786 |
+
# plt.show()
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
# # NonUniformImage in test
|
| 790 |
+
# fig, ax = plt.subplots()
|
| 791 |
+
# data = np.random.rand(10, 10)
|
| 792 |
+
# x = np.linspace(-4, 4, 9)
|
| 793 |
+
# y = np.linspace(-4, 4, 9)
|
| 794 |
+
# z = np.sqrt(x[np.newaxis, :] ** 2 + y[:, np.newaxis] ** 2)
|
| 795 |
+
# im = NonUniformImage(ax, interpolation='bilinear')
|
| 796 |
+
# im.set_data(x, y , z)
|
| 797 |
+
# ax.add_image(im)
|
| 798 |
+
# plt.savefig("tmp.png")
|
| 799 |
+
# plt.show()
|
| 800 |
+
|
| 801 |
+
# broken_barh in test
|
| 802 |
+
# fig, ax = plt.subplots()
|
| 803 |
+
# x = np.linspace(0, 10, 100)
|
| 804 |
+
# y = np.sin(x)
|
| 805 |
+
# draw the broken_barh plot, with color red
|
| 806 |
+
# ax.broken_barh([(1, 2), (3, 4)], (0, 1), facecolors='r')
|
| 807 |
+
# plt.savefig("tmp.png")
|
| 808 |
+
# plt.show()
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
# Ellipse in test
|
| 812 |
+
fig, ax = plt.subplots()
|
| 813 |
+
e = matplotlib.patches.Ellipse((0.5, 0.5), 0.4, 0.2, color='r')
|
| 814 |
+
ax.add_patch(e)
|
| 815 |
+
plt.savefig("tmp.png")
|
| 816 |
+
plt.show()
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
# # radar plot in test
|
| 820 |
+
# fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
|
| 821 |
+
# theta = np.linspace(0, 2*np.pi, 100)
|
| 822 |
+
# r = np.sin(3*theta)**2
|
| 823 |
+
# # draw the radar plot, with color red
|
| 824 |
+
# ax.plot(theta, r, color='r')
|
| 825 |
+
# plt.savefig("tmp.png")
|
| 826 |
+
# plt.show()
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
# import numpy as np; np.random.seed(0)
|
| 830 |
+
|
| 831 |
+
# import matplotlib.pyplot as plt
|
| 832 |
+
# from matplotlib.lines import Line2D
|
| 833 |
+
|
| 834 |
+
# # ===================
|
| 835 |
+
# # Part 2: Data Preparation
|
| 836 |
+
# # ===================
|
| 837 |
+
# # Data for PC1 and PC2
|
| 838 |
+
# values_pc1 = [0.8, 0.7, 0.6, 0.85, 0.9, 0.75, 0.7, 0.65, 0.8, 0.9]
|
| 839 |
+
# values_pc2 = [0.6, 0.55, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15]
|
| 840 |
+
# num_vars = len(values_pc1)
|
| 841 |
+
|
| 842 |
+
# # Compute angle for each axis
|
| 843 |
+
# angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
|
| 844 |
+
|
| 845 |
+
# # The plot is circular, so we need to "complete the loop" and append the start to the end.
|
| 846 |
+
# values_pc1 += values_pc1[:1]
|
| 847 |
+
# values_pc2 += values_pc2[:1]
|
| 848 |
+
# angles += angles[:1]
|
| 849 |
+
|
| 850 |
+
# # ===================
|
| 851 |
+
# # Part 3: Plot Configuration and Rendering
|
| 852 |
+
# # ===================
|
| 853 |
+
# # Draw the radar chart
|
| 854 |
+
# fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(polar=True))
|
| 855 |
+
# ax.fill(angles, values_pc1, color="black", alpha=0.1)
|
| 856 |
+
# ax.plot(angles, values_pc1, color="black", linewidth=2, label="Loadings PC1")
|
| 857 |
+
# ax.scatter(angles[:-1], values_pc1[:-1], color="black", s=50)
|
| 858 |
+
# ax.fill(angles, values_pc2, color="red", alpha=0.1)
|
| 859 |
+
# ax.plot(angles, values_pc2, color="red", linewidth=2, label="Loadings PC2")
|
| 860 |
+
# ax.scatter(angles[:-1], values_pc2[:-1], color="red", s=50)
|
| 861 |
+
|
| 862 |
+
# # Add labels to the plot
|
| 863 |
+
# ax.set_yticklabels([])
|
| 864 |
+
# grid_angles = np.linspace(0, 2 * np.pi, 8, endpoint=False)
|
| 865 |
+
# ax.set_xticks(grid_angles)
|
| 866 |
+
# angle_labels = [f"{i*45}°" for i in range(8)]
|
| 867 |
+
# ax.set_xticklabels(angle_labels)
|
| 868 |
+
|
| 869 |
+
# # Add grid lines and labels for the concentric circles
|
| 870 |
+
# ax.set_rgrids(
|
| 871 |
+
# [0.2, 0.4, 0.6, 0.8, 1.0],
|
| 872 |
+
# labels=["0.2", "0.4", "0.6", "0.8", "1.0"],
|
| 873 |
+
# angle=30,
|
| 874 |
+
# color="black",
|
| 875 |
+
# size=10,
|
| 876 |
+
# )
|
| 877 |
+
|
| 878 |
+
# # Create legend handles manually
|
| 879 |
+
# legend_elements = [
|
| 880 |
+
# Line2D(
|
| 881 |
+
# [0],
|
| 882 |
+
# [0],
|
| 883 |
+
# color="black",
|
| 884 |
+
# linewidth=2,
|
| 885 |
+
# marker="o",
|
| 886 |
+
# markersize=8,
|
| 887 |
+
# label="Loadings PC1",
|
| 888 |
+
# ),
|
| 889 |
+
# Line2D(
|
| 890 |
+
# [0],
|
| 891 |
+
# [0],
|
| 892 |
+
# color="red",
|
| 893 |
+
# linewidth=2,
|
| 894 |
+
# marker="o",
|
| 895 |
+
# markersize=8,
|
| 896 |
+
# label="Loadings PC2",
|
| 897 |
+
# ),
|
| 898 |
+
# ]
|
| 899 |
+
|
| 900 |
+
# # Add legend and title
|
| 901 |
+
# ax.legend(
|
| 902 |
+
# handles=legend_elements, loc="upper right", bbox_to_anchor=(1.1, 1.1), frameon=False
|
| 903 |
+
# )
|
| 904 |
+
|
| 905 |
+
# # ===================
|
| 906 |
+
# # Part 4: Saving Output
|
| 907 |
+
# # ===================
|
| 908 |
+
# # Adjust layout and save the plot
|
| 909 |
+
# plt.tight_layout()
|
| 910 |
+
# plt.savefig('tmp.png')
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
# poly3d in test
|
| 914 |
+
# import math
|
| 915 |
+
# import matplotlib.pyplot as plt
|
| 916 |
+
# import numpy as np; np.random.seed(0)
|
| 917 |
+
|
| 918 |
+
# from matplotlib.collections import PolyCollection
|
| 919 |
+
|
| 920 |
+
# # ===================
|
| 921 |
+
# # Part 2: Data Preparation
|
| 922 |
+
# # ===================
|
| 923 |
+
# # Fixing random state for reproducibility
|
| 924 |
+
# def polygon_under_graph(x, y):
|
| 925 |
+
# """
|
| 926 |
+
# Construct the vertex list which defines the polygon filling the space under
|
| 927 |
+
# the (x, y) line graph. This assumes x is in ascending order.
|
| 928 |
+
# """
|
| 929 |
+
# return [(x[0], 0.0), *zip(x, y), (x[-1], 0.0)]
|
| 930 |
+
|
| 931 |
+
|
| 932 |
+
# x = np.linspace(0.0, 10.0, 31)
|
| 933 |
+
# vaccination_numbers = range(1, 4)
|
| 934 |
+
|
| 935 |
+
# # verts[i] is a list of (x, y) pairs defining polygon i.
|
| 936 |
+
# gamma = np.vectorize(math.gamma)
|
| 937 |
+
# verts = [
|
| 938 |
+
# polygon_under_graph(x, v**x * np.exp(-v) / gamma(x + 1))
|
| 939 |
+
# for v in vaccination_numbers
|
| 940 |
+
# ]
|
| 941 |
+
|
| 942 |
+
# # ===================
|
| 943 |
+
# # Part 3: Plot Configuration and Rendering
|
| 944 |
+
# # ===================
|
| 945 |
+
# ax = plt.figure(figsize=(8, 6)).add_subplot(projection="3d")
|
| 946 |
+
# facecolors = plt.colormaps["viridis_r"](np.linspace(0, 1, len(verts)))
|
| 947 |
+
|
| 948 |
+
# poly = PolyCollection(verts, facecolors=facecolors, alpha=0.7)
|
| 949 |
+
# ax.add_collection3d(poly, zs=vaccination_numbers, zdir="y")
|
| 950 |
+
|
| 951 |
+
# ax.set(
|
| 952 |
+
# xlim=(0, 10),
|
| 953 |
+
# ylim=(1, 4),
|
| 954 |
+
# zlim=(0, 0.35),
|
| 955 |
+
# xlabel="Age",
|
| 956 |
+
# ylabel="Vaccination Number",
|
| 957 |
+
# zlabel="Incidence Rate",
|
| 958 |
+
# )
|
| 959 |
+
|
| 960 |
+
# ax.set_yticks([1, 2, 3])
|
| 961 |
+
# ax.set_box_aspect(aspect=None, zoom=0.8)
|
| 962 |
+
|
| 963 |
+
# # ===================
|
| 964 |
+
# # Part 4: Saving Output
|
| 965 |
+
# # ===================
|
| 966 |
+
# plt.tight_layout()
|
| 967 |
+
# plt.savefig('3d_14.pdf', bbox_inches='tight')
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
drawed_colors = set(drawed_colors)
|
| 971 |
+
print("drawed_colors", drawed_colors)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
from typing import Dict
|
| 3 |
+
# from dotenv import load_dotenv
|
| 4 |
+
# load_dotenv()
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
from ..eval_configs.global_config import run_script_safe
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ChartTypeEvaluator:
|
| 11 |
+
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.metrics = {
|
| 14 |
+
"precision": 0,
|
| 15 |
+
"recall": 0,
|
| 16 |
+
"f1": 0
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
def __call__(self, generation_code_file, golden_code_file):
|
| 20 |
+
generation_chart_types = self._get_chart_types(generation_code_file)
|
| 21 |
+
golden_chart_types = self._get_chart_types(golden_code_file)
|
| 22 |
+
|
| 23 |
+
self.golden_code_file = golden_code_file
|
| 24 |
+
|
| 25 |
+
self._calculate_metrics(generation_chart_types, golden_chart_types)
|
| 26 |
+
|
| 27 |
+
# [TAG] What is this for?
|
| 28 |
+
# redunant_file = os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf")
|
| 29 |
+
# print(f"redunant_file: {redunant_file}")
|
| 30 |
+
# breakpoint()
|
| 31 |
+
# # if os.path.exists(redunant_file) == True:
|
| 32 |
+
# os.remove(redunant_file)
|
| 33 |
+
|
| 34 |
+
# print(self.metrics)
|
| 35 |
+
|
| 36 |
+
def _get_chart_types(self, code_file):
|
| 37 |
+
|
| 38 |
+
with open(code_file, "r") as f:
|
| 39 |
+
lines = f.readlines()
|
| 40 |
+
code = "".join(lines)
|
| 41 |
+
|
| 42 |
+
prefix = self._get_prefix()
|
| 43 |
+
output_file = code_file.replace(".py", "_log_chart_types.txt")
|
| 44 |
+
suffix = self._get_suffix(output_file)
|
| 45 |
+
code = prefix + code + suffix
|
| 46 |
+
|
| 47 |
+
code_log_chart_types_file = code_file.replace(
|
| 48 |
+
".py", "_log_chart_types.py")
|
| 49 |
+
with open(code_log_chart_types_file, "w") as f:
|
| 50 |
+
f.write(code)
|
| 51 |
+
|
| 52 |
+
# os.system(f"python {code_log_chart_types_file}")
|
| 53 |
+
success = run_script_safe(code_log_chart_types_file)
|
| 54 |
+
if not success:
|
| 55 |
+
print("Skip downstream logic due to previous failure.")
|
| 56 |
+
# optionally return default result or continue
|
| 57 |
+
|
| 58 |
+
if os.path.exists(output_file):
|
| 59 |
+
with open(output_file, "r") as f:
|
| 60 |
+
chart_types = f.read()
|
| 61 |
+
chart_types = eval(chart_types)
|
| 62 |
+
os.remove(output_file)
|
| 63 |
+
else:
|
| 64 |
+
chart_types = {}
|
| 65 |
+
os.remove(code_log_chart_types_file)
|
| 66 |
+
|
| 67 |
+
# pdf_file = re.findall(r"plt\.savefig\('(.*)'\)", code)
|
| 68 |
+
# if len(pdf_file) != 0:
|
| 69 |
+
# pdf_file = pdf_file[0].split(",")[0][:-1]
|
| 70 |
+
# print(pdf_file)
|
| 71 |
+
# if os.path.basename(pdf_file) == pdf_file:
|
| 72 |
+
# os.remove(pdf_file)
|
| 73 |
+
|
| 74 |
+
return chart_types
|
| 75 |
+
|
| 76 |
+
def _calculate_metrics(
|
| 77 |
+
self, generation_chart_types: Dict[str, int], golden_chart_types: Dict[str, int]):
|
| 78 |
+
"""
|
| 79 |
+
Calculate precision, recall, and f1 score of the chart types.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
- generation_chart_types: Dict[str, int]
|
| 83 |
+
- key: chart type
|
| 84 |
+
- value: number of times the chart type is called
|
| 85 |
+
- golden_chart_types: Dict[str, int]
|
| 86 |
+
- key: chart type
|
| 87 |
+
- value: number of times the chart type is called
|
| 88 |
+
"""
|
| 89 |
+
if len(generation_chart_types) == 0:
|
| 90 |
+
return
|
| 91 |
+
|
| 92 |
+
n_correct = 0
|
| 93 |
+
total = sum(generation_chart_types.values())
|
| 94 |
+
|
| 95 |
+
for chart_type, count in generation_chart_types.items():
|
| 96 |
+
if chart_type in golden_chart_types:
|
| 97 |
+
n_correct += min(count, golden_chart_types[chart_type])
|
| 98 |
+
|
| 99 |
+
self.metrics["precision"] = n_correct / total
|
| 100 |
+
try:
|
| 101 |
+
self.metrics["recall"] = n_correct / \
|
| 102 |
+
sum(golden_chart_types.values())
|
| 103 |
+
except BaseException:
|
| 104 |
+
print(
|
| 105 |
+
"<<<<<<<<<<<<<<<<<<<<golden_code_file",
|
| 106 |
+
self.golden_code_file)
|
| 107 |
+
if self.metrics["precision"] + self.metrics["recall"] == 0:
|
| 108 |
+
self.metrics["f1"] = 0
|
| 109 |
+
else:
|
| 110 |
+
self.metrics["f1"] = 2 * self.metrics["precision"] * \
|
| 111 |
+
self.metrics["recall"] / (self.metrics["precision"] + self.metrics["recall"])
|
| 112 |
+
return
|
| 113 |
+
|
| 114 |
+
def _get_prefix(self):
|
| 115 |
+
with open(os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"] + "/evaluator/chart_type_evaluator_prefix.py", "r") as f:
|
| 116 |
+
prefix = f.read()
|
| 117 |
+
return prefix
|
| 118 |
+
|
| 119 |
+
# def _get_prefix(self):
|
| 120 |
+
# return f"""
|
| 121 |
+
# import warnings
|
| 122 |
+
# warnings.filterwarnings("ignore", category=UserWarning)
|
| 123 |
+
|
| 124 |
+
# import sys
|
| 125 |
+
# sys.path.insert(0, '{os.environ['PROJECT_PATH']}')
|
| 126 |
+
# import matplotlib.pyplot as plt
|
| 127 |
+
# from matplotlib.axes import Axes
|
| 128 |
+
# import squarify
|
| 129 |
+
# import inspect
|
| 130 |
+
|
| 131 |
+
# called_functions = {{}}
|
| 132 |
+
# in_decorator = False
|
| 133 |
+
|
| 134 |
+
# def log_function(func):
|
| 135 |
+
# def wrapper(*args, **kwargs):
|
| 136 |
+
# global in_decorator
|
| 137 |
+
# if not in_decorator:
|
| 138 |
+
# in_decorator = True
|
| 139 |
+
# # name = func.__name__
|
| 140 |
+
# file_name = inspect.getfile(func)
|
| 141 |
+
# name = file_name + "-" + func.__name__
|
| 142 |
+
# called_functions[name] = called_functions.get(name, 0) + 1
|
| 143 |
+
# result = func(*args, **kwargs)
|
| 144 |
+
# in_decorator = False
|
| 145 |
+
# return result
|
| 146 |
+
# else:
|
| 147 |
+
# return func(*args, **kwargs)
|
| 148 |
+
# return wrapper
|
| 149 |
+
|
| 150 |
+
# Axes.plot = log_function(Axes.plot)
|
| 151 |
+
# Axes.loglog = log_function(Axes.loglog)
|
| 152 |
+
# Axes.scatter = log_function(Axes.scatter)
|
| 153 |
+
# Axes.bar = log_function(Axes.bar)
|
| 154 |
+
# Axes.barh = log_function(Axes.barh)
|
| 155 |
+
# Axes.axhline = log_function(Axes.axhline)
|
| 156 |
+
# Axes.axvline = log_function(Axes.axvline)
|
| 157 |
+
# Axes.errorbar = log_function(Axes.errorbar)
|
| 158 |
+
# Axes.matshow = log_function(Axes.matshow)
|
| 159 |
+
# Axes.hist = log_function(Axes.hist)
|
| 160 |
+
# Axes.pie = log_function(Axes.pie)
|
| 161 |
+
# Axes.boxplot = log_function(Axes.boxplot)
|
| 162 |
+
# Axes.arrow = log_function(Axes.arrow)
|
| 163 |
+
# Axes.fill_between = log_function(Axes.fill_between)
|
| 164 |
+
# Axes.fill_betweenx = log_function(Axes.fill_betweenx)
|
| 165 |
+
# Axes.imshow = log_function(Axes.imshow)
|
| 166 |
+
# Axes.contour = log_function(Axes.contour)
|
| 167 |
+
# Axes.contourf = log_function(Axes.contourf)
|
| 168 |
+
# Axes.violinplot = log_function(Axes.violinplot)
|
| 169 |
+
# Axes.violin = log_function(Axes.violin)
|
| 170 |
+
|
| 171 |
+
# squarify.plot = log_function(squarify.plot)
|
| 172 |
+
# """
|
| 173 |
+
|
| 174 |
+
def _get_suffix(self, output_file):
|
| 175 |
+
return f"""
|
| 176 |
+
# print(called_functions)
|
| 177 |
+
with open('{output_file}', 'w') as f:
|
| 178 |
+
f.write(str(called_functions))
|
| 179 |
+
"""
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/mp_util.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
import traceback
|
| 4 |
+
import os
|
| 5 |
+
from typing import Callable, Iterable
|
| 6 |
+
from ....smp import *
|
| 7 |
+
|
| 8 |
+
logger = get_logger("ChartMimic/mp_util")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def track_progress_rich_new(
|
| 12 |
+
func: Callable,
|
| 13 |
+
tasks: Iterable = tuple(),
|
| 14 |
+
nproc: int = 1,
|
| 15 |
+
save=None,
|
| 16 |
+
keys=None,
|
| 17 |
+
**kwargs
|
| 18 |
+
) -> list:
|
| 19 |
+
"""
|
| 20 |
+
Parallel execution with progress tracking and safe interim saving.
|
| 21 |
+
"""
|
| 22 |
+
# Prepare persistent storage
|
| 23 |
+
if save:
|
| 24 |
+
os.makedirs(os.path.dirname(save), exist_ok=True)
|
| 25 |
+
if not os.path.exists(save):
|
| 26 |
+
dump({}, save)
|
| 27 |
+
res = load(save)
|
| 28 |
+
else:
|
| 29 |
+
res = {}
|
| 30 |
+
|
| 31 |
+
results = [None] * len(tasks)
|
| 32 |
+
future_to_idx = {}
|
| 33 |
+
|
| 34 |
+
# Use process pool to bypass GIL for CPU-bound tasks
|
| 35 |
+
with ProcessPoolExecutor(max_workers=nproc) as executor:
|
| 36 |
+
for idx, inp in enumerate(tasks):
|
| 37 |
+
# Support dict, tuple/list, or single-value tasks
|
| 38 |
+
if isinstance(inp, dict):
|
| 39 |
+
future = executor.submit(func, **inp)
|
| 40 |
+
elif isinstance(inp, (list, tuple)):
|
| 41 |
+
future = executor.submit(func, *inp)
|
| 42 |
+
else:
|
| 43 |
+
future = executor.submit(func, inp)
|
| 44 |
+
future_to_idx[future] = idx
|
| 45 |
+
|
| 46 |
+
# Display progress bar as tasks complete
|
| 47 |
+
with tqdm(total=len(tasks)) as pbar:
|
| 48 |
+
for future in as_completed(future_to_idx):
|
| 49 |
+
idx = future_to_idx[future]
|
| 50 |
+
key = keys[idx] if keys else None
|
| 51 |
+
try:
|
| 52 |
+
result = future.result()
|
| 53 |
+
except Exception as e:
|
| 54 |
+
exc_type = type(e).__name__
|
| 55 |
+
err_msg = f"[{exc_type}] Exception in task {key or idx}: {str(e)}"
|
| 56 |
+
logger.error(err_msg)
|
| 57 |
+
logger.error("Full traceback:")
|
| 58 |
+
logger.error(traceback.format_exc())
|
| 59 |
+
|
| 60 |
+
# Optional: attach traceback to result for downstream
|
| 61 |
+
# reference
|
| 62 |
+
result = getattr(e, 'result', (-1, {
|
| 63 |
+
'msg': err_msg,
|
| 64 |
+
'traceback': traceback.format_exc(),
|
| 65 |
+
}))
|
| 66 |
+
|
| 67 |
+
results[idx] = result
|
| 68 |
+
# Update persistent results
|
| 69 |
+
if keys and key is not None:
|
| 70 |
+
res[key] = result
|
| 71 |
+
if save:
|
| 72 |
+
dump(res, save) # save after each task
|
| 73 |
+
|
| 74 |
+
pbar.update(1)
|
| 75 |
+
|
| 76 |
+
return results
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/hrbench.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def report_acc_hrbench(df):
|
| 6 |
+
cycle_group = df.groupby('cycle_category')
|
| 7 |
+
result_dic = defaultdict(list)
|
| 8 |
+
avg_dic = defaultdict(int)
|
| 9 |
+
|
| 10 |
+
count = 0
|
| 11 |
+
for key, data_value in cycle_group:
|
| 12 |
+
count += 1
|
| 13 |
+
_, resp_dic = hrbench_score(data_value)
|
| 14 |
+
|
| 15 |
+
for task_type, accuracy in resp_dic.items():
|
| 16 |
+
result_dic['cycle'].append(key)
|
| 17 |
+
result_dic['type'].append(task_type)
|
| 18 |
+
result_dic['accuracy'].append(accuracy)
|
| 19 |
+
|
| 20 |
+
avg_dic[task_type] += accuracy
|
| 21 |
+
for task_type, accuracy in avg_dic.items():
|
| 22 |
+
result_dic['cycle'].append('Average')
|
| 23 |
+
result_dic['type'].append(task_type)
|
| 24 |
+
result_dic['accuracy'].append(accuracy / count)
|
| 25 |
+
result_pd = pd.DataFrame(result_dic)
|
| 26 |
+
|
| 27 |
+
return result_pd
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def hrbench_score(data):
|
| 31 |
+
ret = defaultdict(list)
|
| 32 |
+
resp_dic = {}
|
| 33 |
+
category_list = set(data['category'])
|
| 34 |
+
score_dict = defaultdict(list)
|
| 35 |
+
|
| 36 |
+
for i in range(len(data)):
|
| 37 |
+
d = data.iloc[i]
|
| 38 |
+
category = d['category']
|
| 39 |
+
gpt_score = d['hit']
|
| 40 |
+
score_dict[category].append(gpt_score)
|
| 41 |
+
score_dict['all'].append(gpt_score)
|
| 42 |
+
|
| 43 |
+
all_acc = np.mean(score_dict['all'])
|
| 44 |
+
ret['type'].append('all')
|
| 45 |
+
ret['acc'].append(all_acc)
|
| 46 |
+
resp_dic['all'] = all_acc
|
| 47 |
+
for cate in category_list:
|
| 48 |
+
acc = np.mean(score_dict[cate])
|
| 49 |
+
ret['type'].append(cate)
|
| 50 |
+
ret['acc'].append(acc)
|
| 51 |
+
|
| 52 |
+
resp_dic[cate] = acc
|
| 53 |
+
|
| 54 |
+
return pd.DataFrame(ret), resp_dic
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/logicvista.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
|
| 3 |
+
# from colorama import Fore, Back, Style
|
| 4 |
+
from ...smp import *
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def build_prompt_logicvista(line):
|
| 11 |
+
question = line['question']
|
| 12 |
+
prediction = str(line['prediction'])
|
| 13 |
+
tmpl = (
|
| 14 |
+
"You are a information extractor that extracts multiple choice letter answer choices "
|
| 15 |
+
"from a paragraph that contains the answer choice and sometimes explaination of why that "
|
| 16 |
+
"choice is correct to the given question.\n"
|
| 17 |
+
"What letter did the following answer choose? If the answer did not select a letter answer choice, "
|
| 18 |
+
"first try to infer the answer based off the given choices.\n"
|
| 19 |
+
"If it does not seem like the given answer corresponds to an answer choice OR if there is no selected answer, please just respond with Z.\n" # noqa: E501
|
| 20 |
+
"Make sure you answer with ONLY the letters chosen.\n"
|
| 21 |
+
'Example 1: \n'
|
| 22 |
+
'Question: <start>\nWhat is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n<end>\n'
|
| 23 |
+
'Answer: <start>\na cute teddy bear\n<end>\nYour output: A\n'
|
| 24 |
+
'Example 2: \n'
|
| 25 |
+
'Question: <start>\nWhat is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n<end>\n'
|
| 26 |
+
'Answer: <start>\nSpider\n<end>\nYour output: Z\n'
|
| 27 |
+
'Example 3: \n'
|
| 28 |
+
'Question: <start>\nWhich figure is a rotation of the object?\n<end>\n'
|
| 29 |
+
'Answer: <start>\nThe figure on the right, labeled "D," is a rotation of the object shown in the top left corner.\n<end>\nYour output: D\n' # noqa: E501
|
| 30 |
+
'Example 4: \n'
|
| 31 |
+
'Question: <start>\nWhich of the boxes comes next in the sequence? Select from A-E\n<end>\n'
|
| 32 |
+
'Answer: <start>\nThe sequence of the boxes is A, B, C, D, E.\n<end>\nYour output: ABCDE\n'
|
| 33 |
+
'Example 5: \n'
|
| 34 |
+
'Question: <start>\n{}\n<end>\nAnswer: <start>\n{}\n<end>\nYour output: '
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
return tmpl.format(question, prediction)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def LogicVista_auxeval(model, line):
|
| 41 |
+
prompt = build_prompt_logicvista(line)
|
| 42 |
+
print(prompt)
|
| 43 |
+
log = ''
|
| 44 |
+
retry = 5
|
| 45 |
+
|
| 46 |
+
for i in range(retry):
|
| 47 |
+
prediction = line['prediction']
|
| 48 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 49 |
+
answer = line['answer'].split(", ")
|
| 50 |
+
for j in range(0, len(answer)):
|
| 51 |
+
answer[j] = answer[j].lower()
|
| 52 |
+
answer.sort()
|
| 53 |
+
answer = ''.join(answer)
|
| 54 |
+
|
| 55 |
+
if FAIL_MSG in res:
|
| 56 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 57 |
+
elif not res.isupper() or not res.isalpha():
|
| 58 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 59 |
+
else:
|
| 60 |
+
log += 'Succeed'
|
| 61 |
+
hit = 0
|
| 62 |
+
extracted = [alpha.lower() for alpha in res]
|
| 63 |
+
extracted.sort()
|
| 64 |
+
extracted = ''.join(extracted)
|
| 65 |
+
if extracted == answer:
|
| 66 |
+
hit = 1
|
| 67 |
+
return dict(log=log, res=res, hit=hit)
|
| 68 |
+
log += 'All 5 retries failed.\n'
|
| 69 |
+
return dict(log=log, res='', hit=0)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
cat = ["diagram", "ocr", "patterns", "graphs", "tables", "3d shapes", "puzzles", "sequences", "physics"]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def evaluate_logicvista(file_path):
|
| 76 |
+
df = load(file_path)
|
| 77 |
+
|
| 78 |
+
tot = defaultdict(lambda: 0)
|
| 79 |
+
hit = defaultdict(lambda: 0)
|
| 80 |
+
acc = defaultdict(lambda: 0)
|
| 81 |
+
|
| 82 |
+
_ = len(df)
|
| 83 |
+
_ = []
|
| 84 |
+
|
| 85 |
+
df_tot = df
|
| 86 |
+
|
| 87 |
+
df_inductive = df[df["skill"].str.contains("inductive")]
|
| 88 |
+
df_deductive = df[df["skill"].str.contains("deductive")]
|
| 89 |
+
df_numerical = df[df["skill"].str.contains("numerical")]
|
| 90 |
+
df_spatial = df[df["skill"].str.contains("spatial")]
|
| 91 |
+
df_mechanical = df[df["skill"].str.contains("mechanical")]
|
| 92 |
+
|
| 93 |
+
tot_correct = df_tot["hit"].sum()
|
| 94 |
+
tot_acc = (tot_correct / df_tot.shape[0]) * 100
|
| 95 |
+
tot['Overall'] = df_tot.shape[0]
|
| 96 |
+
hit['Overall'] = tot_correct
|
| 97 |
+
acc['Overall'] = tot_acc
|
| 98 |
+
|
| 99 |
+
inductive_correct = df_inductive["hit"].sum()
|
| 100 |
+
inductive_acc = (inductive_correct / df_inductive.shape[0]) * 100
|
| 101 |
+
|
| 102 |
+
tot["inductive"] = df_inductive.shape[0]
|
| 103 |
+
hit["inductive"] = inductive_correct
|
| 104 |
+
acc["inductive"] = inductive_acc
|
| 105 |
+
|
| 106 |
+
deductive_correct = df_deductive["hit"].sum()
|
| 107 |
+
deductive_acc = (deductive_correct / df_deductive.shape[0]) * 100
|
| 108 |
+
|
| 109 |
+
tot["deductive"] = df_deductive.shape[0]
|
| 110 |
+
hit["deductive"] = deductive_correct
|
| 111 |
+
acc["deductive"] = deductive_acc
|
| 112 |
+
|
| 113 |
+
numerical_correct = df_numerical["hit"].sum()
|
| 114 |
+
numerical_acc = (numerical_correct / df_numerical.shape[0]) * 100
|
| 115 |
+
|
| 116 |
+
tot["numerical"] = df_numerical.shape[0]
|
| 117 |
+
hit["numerical"] = numerical_correct
|
| 118 |
+
acc["numerical"] = numerical_acc
|
| 119 |
+
|
| 120 |
+
spatial_correct = df_spatial["hit"].sum()
|
| 121 |
+
spatial_acc = (spatial_correct / df_spatial.shape[0]) * 100
|
| 122 |
+
|
| 123 |
+
tot["spatial"] = df_spatial.shape[0]
|
| 124 |
+
hit["spatial"] = spatial_correct
|
| 125 |
+
acc["spatial"] = spatial_acc
|
| 126 |
+
|
| 127 |
+
mechanical_correct = df_mechanical["hit"].sum()
|
| 128 |
+
mechanical_acc = (mechanical_correct / df_mechanical.shape[0]) * 100
|
| 129 |
+
|
| 130 |
+
tot["mechanical"] = df_mechanical.shape[0]
|
| 131 |
+
hit["mechanical"] = mechanical_correct
|
| 132 |
+
acc["mechanical"] = mechanical_acc
|
| 133 |
+
|
| 134 |
+
# capability dimension, the official data json does not contain 'capability' column, so it is now ignored
|
| 135 |
+
# for i in cat:
|
| 136 |
+
# curr = df[df["capability"].str.contains(i.replace(" ", ""))]
|
| 137 |
+
# correct = curr["hit"].sum()
|
| 138 |
+
# accuracy = (correct / curr.shape[0]) * 100
|
| 139 |
+
# tot[i] = curr.shape[0]
|
| 140 |
+
# hit[i] = correct
|
| 141 |
+
# acc[i] = accuracy
|
| 142 |
+
|
| 143 |
+
res = defaultdict(list)
|
| 144 |
+
for k in tot.keys():
|
| 145 |
+
res['Task&Skill'].append(k)
|
| 146 |
+
res['tot'].append(tot[k])
|
| 147 |
+
res['hit'].append(hit[k])
|
| 148 |
+
res['acc'].append(acc[k])
|
| 149 |
+
res = pd.DataFrame(res)
|
| 150 |
+
return res
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (348 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/aggregation_type.cpython-310.pyc
ADDED
|
Binary file (917 Bytes). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/metric_type.cpython-310.pyc
ADDED
|
Binary file (8.88 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/mean_agg.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numbers import Number
|
| 2 |
+
from typing import Dict
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class MeanAggregation:
|
| 7 |
+
"""Take the mean of all valid scores."""
|
| 8 |
+
|
| 9 |
+
@staticmethod
|
| 10 |
+
def aggregate(scores: Dict[str, Number], weights: Dict[str, Number]) -> Number:
|
| 11 |
+
"""Exact match between targets and responses."""
|
| 12 |
+
filtered_scores = {f: s for f, s in scores.items() if s >= 0}
|
| 13 |
+
if not filtered_scores:
|
| 14 |
+
return -1
|
| 15 |
+
|
| 16 |
+
# Align the key order
|
| 17 |
+
flattened_scores = []
|
| 18 |
+
flattened_weights = []
|
| 19 |
+
for field in filtered_scores:
|
| 20 |
+
flattened_scores.append(filtered_scores[field])
|
| 21 |
+
flattened_weights.append(weights[field])
|
| 22 |
+
return np.average(flattened_scores, weights=flattened_weights)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/unsupported_agg.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numbers import Number
|
| 2 |
+
from typing import Dict
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class UnsupportedAggregation:
|
| 6 |
+
@staticmethod
|
| 7 |
+
def aggregate(scores: Dict[str, Number], weights: Dict[str, Number]) -> Number:
|
| 8 |
+
return -1
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/chess_jaccard.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Dict, Any
|
| 3 |
+
from .common.conversions import str_to_set
|
| 4 |
+
from .common.metrics import jaccard_index
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def chess_transform(move_sequence: str) -> set:
|
| 8 |
+
"""Transform a sequence of chess moves encoded in SAN into a set."""
|
| 9 |
+
move_sequence = str_to_set(move_sequence)
|
| 10 |
+
return {move_san.removesuffix("!").removesuffix("#") for move_san in move_sequence}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ChessMoveJaccard:
|
| 14 |
+
"""Calculates the Jacard index for chess moves."""
|
| 15 |
+
|
| 16 |
+
@classmethod
|
| 17 |
+
def match(cls, responses: str | None, targets: str) -> float:
|
| 18 |
+
"""Exact match between targets and responses."""
|
| 19 |
+
if responses is None:
|
| 20 |
+
return 0
|
| 21 |
+
responses = chess_transform(responses)
|
| 22 |
+
targets = chess_transform(targets)
|
| 23 |
+
|
| 24 |
+
return jaccard_index(responses, targets)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/constrained_generation.py
ADDED
|
@@ -0,0 +1,518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
from numbers import Number
|
| 5 |
+
import re
|
| 6 |
+
from nltk.tokenize import sent_tokenize, word_tokenize
|
| 7 |
+
from nltk.stem import WordNetLemmatizer
|
| 8 |
+
from nltk.corpus import wordnet
|
| 9 |
+
import pronouncing
|
| 10 |
+
from .common.conversions import str_to_iterable
|
| 11 |
+
from ..parsing.common.parsers import parse_nested_str_list, parse_syllable_ranges
|
| 12 |
+
import signal
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def custom_lemmatize(word, lemmatizer):
|
| 16 |
+
"""
|
| 17 |
+
Custom lemmatization to handle special cases like 'puppies' -> 'puppy'.
|
| 18 |
+
"""
|
| 19 |
+
lemma = lemmatizer.lemmatize(word, wordnet.NOUN)
|
| 20 |
+
|
| 21 |
+
# Handle irregular plural forms manually
|
| 22 |
+
if word.endswith("ies") and lemma.endswith("y"):
|
| 23 |
+
lemma = lemma[:-1] + "y"
|
| 24 |
+
elif word.endswith("ves") and lemma.endswith("f"):
|
| 25 |
+
lemma = lemma[:-1] + "f"
|
| 26 |
+
|
| 27 |
+
return lemma
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def lemmatize_phrase(phrase, lemmatizer):
|
| 31 |
+
"""
|
| 32 |
+
Lemmatizes a phrase (multiple words).
|
| 33 |
+
"""
|
| 34 |
+
words = word_tokenize(phrase.lower())
|
| 35 |
+
lemmatized_words = [custom_lemmatize(word, lemmatizer) for word in words]
|
| 36 |
+
return " ".join(lemmatized_words)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
custom_phones_for_word = {
|
| 40 |
+
"'gainst": ["G EH1 N S T", "G EY1 N S T"],
|
| 41 |
+
"'midst": ["M IH1 D S T"],
|
| 42 |
+
"'mongst": ["M AH1 NG S T"],
|
| 43 |
+
"'neath": ["N IY1 TH"],
|
| 44 |
+
"beguiles": ["B IH0 G AY1 L Z"],
|
| 45 |
+
"cerulean": ["S ER0 UW1 L IY0 AH0 N"],
|
| 46 |
+
"doggo": ["D AO1 G OW0"],
|
| 47 |
+
"downtorn": ["D AW1 N T AO2 R N"],
|
| 48 |
+
"enthrall": ["EH0 N TH R AO1 L"],
|
| 49 |
+
"fam'ly": ["F AE1 M L IY0"],
|
| 50 |
+
"fiery": ["F AY1 ER0 IY0", "F AY1 R IY0"],
|
| 51 |
+
"flits": ["F L IH1 T S"],
|
| 52 |
+
"furred": ["F ER1 D"],
|
| 53 |
+
"kneels": ["N IY1 L Z"],
|
| 54 |
+
"o'er": ["OW1 ER0"],
|
| 55 |
+
"orbs": ["AO1 R B Z"],
|
| 56 |
+
"quenched": ["K W EH1 N CH D"],
|
| 57 |
+
"quietude": ["K W AY1 AH0 T UW0 D"],
|
| 58 |
+
"retold": ["R IY0 T OW1 L D"],
|
| 59 |
+
"scurries": ["S K ER1 IY0 Z"],
|
| 60 |
+
"sunbeams": ["S AH1 N B IY2 M Z"],
|
| 61 |
+
"syncs": ["S IH1 NG K S"],
|
| 62 |
+
"'twixt": ["T W IH1 K S T"],
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
file_logger = logging.getLogger("errorLogger")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def phones_for_word(text: str) -> list[str]:
|
| 70 |
+
"""A wrapper for pronouncingpy's phones_for_word to handle out-of-vocab issues."""
|
| 71 |
+
text = text.replace("’", "'").lower()
|
| 72 |
+
|
| 73 |
+
suffixes = [""]
|
| 74 |
+
prefixes = [""]
|
| 75 |
+
prefixes_to_remove = [""]
|
| 76 |
+
if text.endswith("'s"):
|
| 77 |
+
suffixes = [" Z"]
|
| 78 |
+
text = text.removesuffix("'s")
|
| 79 |
+
|
| 80 |
+
if text in custom_phones_for_word:
|
| 81 |
+
return [
|
| 82 |
+
pr + suffix
|
| 83 |
+
for pr, suffix in itertools.product(custom_phones_for_word[text], suffixes)
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
pronunciations = pronouncing.phones_for_word(text)
|
| 87 |
+
|
| 88 |
+
# Guess pronunciation from word stem
|
| 89 |
+
if not pronunciations:
|
| 90 |
+
if suffixes[0] != "": # "'s doesn't really work with the rest."
|
| 91 |
+
file_logger.error(f"OOV: {text}")
|
| 92 |
+
return []
|
| 93 |
+
|
| 94 |
+
if text.endswith("ed"):
|
| 95 |
+
suffixes = [" D", " T", " AH0 D", " IH0 D"]
|
| 96 |
+
text = text.removesuffix("ed")
|
| 97 |
+
elif text.endswith("s"):
|
| 98 |
+
# Some words, like bustles, end with es but the plural suffix is s.
|
| 99 |
+
if pronouncing.phones_for_word(text.removesuffix("s")):
|
| 100 |
+
# On the other hand, pierces is two syllables but pierce is one.
|
| 101 |
+
if text.endswith("es"):
|
| 102 |
+
suffixes = [" S", " Z", " AH0 Z", " IH0 Z"]
|
| 103 |
+
text = text.removesuffix("s")
|
| 104 |
+
else:
|
| 105 |
+
suffixes = [" S", " Z"]
|
| 106 |
+
text = text.removesuffix("s")
|
| 107 |
+
elif text.endswith("es"):
|
| 108 |
+
suffixes = [" AH0 Z", " IH0 Z"]
|
| 109 |
+
text = text.removesuffix("es")
|
| 110 |
+
if text.startswith("un"):
|
| 111 |
+
prefixes = ["AH0 N "]
|
| 112 |
+
text = text.removeprefix("un")
|
| 113 |
+
elif text.startswith("'"):
|
| 114 |
+
if pronouncing.phones_for_word("a" + text.removeprefix("'")):
|
| 115 |
+
prefixes_to_remove = ["AH0 "]
|
| 116 |
+
text = "a" + text.removeprefix("'")
|
| 117 |
+
pronunciations = pronouncing.phones_for_word(text)
|
| 118 |
+
pronunciations = [
|
| 119 |
+
(prefix + pr + suffix).removeprefix(prefix_to_remove)
|
| 120 |
+
for prefix, pr, suffix, prefix_to_remove in itertools.product(
|
| 121 |
+
prefixes, pronunciations, suffixes, prefixes_to_remove
|
| 122 |
+
)
|
| 123 |
+
]
|
| 124 |
+
|
| 125 |
+
if not pronunciations:
|
| 126 |
+
file_logger.error(f"OOV: {text}")
|
| 127 |
+
return pronunciations
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def rhyming_part_include_unstressed(phones: str) -> str:
|
| 131 |
+
"""Get the "rhyming part" of a string with CMUdict phones.
|
| 132 |
+
|
| 133 |
+
"Rhyming part" here means everything from the vowel in the
|
| 134 |
+
last syllable up to the end of the word.
|
| 135 |
+
|
| 136 |
+
Example:
|
| 137 |
+
>>> import pronouncing
|
| 138 |
+
>>> phones = pronouncing.phones_for_word("purple")
|
| 139 |
+
>>> rhyming_part_include_unstressed(phones[0])
|
| 140 |
+
'AH0 L'
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
phones: a string containing space-separated CMUdict phones
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
a string with just the "rhyming part" of those phones
|
| 147 |
+
"""
|
| 148 |
+
phones_list = phones.split()
|
| 149 |
+
for i in range(len(phones_list) - 1, 0, -1):
|
| 150 |
+
if phones_list[i][-1] in "012":
|
| 151 |
+
phones = " ".join(phones_list[i:])
|
| 152 |
+
break
|
| 153 |
+
return re.sub(r"\d", "", phones)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def count_syllables(text: str) -> list[int]:
|
| 157 |
+
"""Count the number of syllables in a piece of text."""
|
| 158 |
+
pronunciations = [phones_for_word(p) for p in text.split()]
|
| 159 |
+
syllable_counts = []
|
| 160 |
+
for pronun_possibility in itertools.product(*pronunciations):
|
| 161 |
+
syllable_counts.append(
|
| 162 |
+
sum([pronouncing.syllable_count(p) for p in pronun_possibility])
|
| 163 |
+
)
|
| 164 |
+
return syllable_counts
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def find_string_occurrences_with_variations(text, search_string):
|
| 168 |
+
lemmatizer = WordNetLemmatizer()
|
| 169 |
+
|
| 170 |
+
# Lemmatize the entire search phrase
|
| 171 |
+
search_lemma = lemmatize_phrase(search_string, lemmatizer)
|
| 172 |
+
|
| 173 |
+
# Tokenize the text into sentences
|
| 174 |
+
sentences = sent_tokenize(text)
|
| 175 |
+
|
| 176 |
+
occurrences = []
|
| 177 |
+
total_count = 0
|
| 178 |
+
|
| 179 |
+
# Iterate over each sentence
|
| 180 |
+
for i, sentence in enumerate(sentences, 1): # Sentence numbers start from 1
|
| 181 |
+
# Lemmatize the entire sentence
|
| 182 |
+
lemmatized_sentence = lemmatize_phrase(sentence, lemmatizer)
|
| 183 |
+
|
| 184 |
+
# Count occurrences of the lemmatized search phrase in the lemmatized sentence
|
| 185 |
+
count_in_sentence = lemmatized_sentence.count(search_lemma)
|
| 186 |
+
if count_in_sentence > 0:
|
| 187 |
+
occurrences.append((i, count_in_sentence))
|
| 188 |
+
total_count += count_in_sentence
|
| 189 |
+
|
| 190 |
+
return total_count, occurrences
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def word_to_stresses(word: str) -> list[list[int]]:
|
| 194 |
+
"""Convert a word to a list of stresses, for each valid pronunciation."""
|
| 195 |
+
pronunciations = phones_for_word(word)
|
| 196 |
+
stresses = {
|
| 197 |
+
tuple(int(stress) for stress in pronouncing.stresses(pronunc))
|
| 198 |
+
for pronunc in pronunciations
|
| 199 |
+
}
|
| 200 |
+
return [list(pronunc_stresses) for pronunc_stresses in stresses]
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def is_iambic_pair(stress1: int, stress2: int) -> bool:
|
| 204 |
+
"""Whether the pair of stresses is a valid iambic pair."""
|
| 205 |
+
valid_pairs = {(2, 1), (0, 2), (0, 1), (0, 0), (1, 1), (2, 2)}
|
| 206 |
+
return (stress1, stress2) in valid_pairs
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def grouper_ignore_last(iterable, n):
|
| 210 |
+
"Collect data into fixed-length chunks or blocks"
|
| 211 |
+
# grouper('ABCDEFG', 3) --> ABC DEF
|
| 212 |
+
args = [iter(iterable)] * n
|
| 213 |
+
return zip(*args)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def is_line_iambic(line: str) -> bool:
|
| 217 |
+
"""Determine if a line is iambic."""
|
| 218 |
+
words = line.split()
|
| 219 |
+
stress_options = [word_to_stresses(word) for word in words]
|
| 220 |
+
|
| 221 |
+
def backtrack(word_index: int, syllable_index: int, prev_stress: int) -> bool:
|
| 222 |
+
"""Using backtracking, determine if there is a sequence of pronunciations that is in iambic pentameter."""
|
| 223 |
+
if word_index == len(words):
|
| 224 |
+
# At this point, syllable_index is the number of syllables
|
| 225 |
+
return syllable_index % 2 == 0
|
| 226 |
+
|
| 227 |
+
for stress_pattern in stress_options[word_index]:
|
| 228 |
+
word_syllable_index = 0
|
| 229 |
+
if syllable_index % 2 != 0:
|
| 230 |
+
current_stress = stress_pattern[word_syllable_index]
|
| 231 |
+
if not is_iambic_pair(prev_stress, current_stress):
|
| 232 |
+
continue
|
| 233 |
+
word_syllable_index += 1
|
| 234 |
+
|
| 235 |
+
word_valid_iambic_pairs = True
|
| 236 |
+
for stress1, stress2 in grouper_ignore_last(
|
| 237 |
+
stress_pattern[word_syllable_index:], 2
|
| 238 |
+
):
|
| 239 |
+
if not is_iambic_pair(stress1, stress2):
|
| 240 |
+
word_valid_iambic_pairs = False
|
| 241 |
+
break
|
| 242 |
+
word_syllable_index += 2
|
| 243 |
+
if not word_valid_iambic_pairs:
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
if word_syllable_index < len(stress_pattern):
|
| 247 |
+
assert word_syllable_index + 1 == len(stress_pattern)
|
| 248 |
+
next_stress = stress_pattern[word_syllable_index]
|
| 249 |
+
if backtrack(
|
| 250 |
+
word_index + 1,
|
| 251 |
+
syllable_index + word_syllable_index + 1,
|
| 252 |
+
next_stress,
|
| 253 |
+
):
|
| 254 |
+
return True
|
| 255 |
+
else:
|
| 256 |
+
assert word_syllable_index == len(stress_pattern)
|
| 257 |
+
if backtrack(word_index + 1, syllable_index + word_syllable_index, -1):
|
| 258 |
+
return True
|
| 259 |
+
|
| 260 |
+
return False
|
| 261 |
+
|
| 262 |
+
return backtrack(
|
| 263 |
+
0, 0, -1
|
| 264 |
+
) # Start with -1 as prev_stress as a placeholder for the first syllable
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def parse_constraints(key_string, value_string):
|
| 268 |
+
key_components = key_string.strip().split("##")
|
| 269 |
+
# Remove trailing numbers from each key
|
| 270 |
+
key_components = [re.sub(r"\d+$", "", key) for key in key_components]
|
| 271 |
+
# Extract value components by splitting on ##
|
| 272 |
+
value_components = value_string.strip().split("##")
|
| 273 |
+
# Clean value components by removing brackets and spaces
|
| 274 |
+
value_components = [comp.strip().strip('"').strip() for comp in value_components]
|
| 275 |
+
|
| 276 |
+
# Handle cases where we expect integers
|
| 277 |
+
for i, value in enumerate(value_components):
|
| 278 |
+
if value.isdigit():
|
| 279 |
+
value_components[i] = int(value)
|
| 280 |
+
|
| 281 |
+
# Combine keys and values into a dictionary
|
| 282 |
+
if len(key_components) == len(value_components):
|
| 283 |
+
result = {
|
| 284 |
+
key.lower(): value for key, value in zip(key_components, value_components)
|
| 285 |
+
}
|
| 286 |
+
elif len(key_components) == 1 and len(value_components) == 1:
|
| 287 |
+
result = {key_components[0].lower(): value_components[0]}
|
| 288 |
+
else:
|
| 289 |
+
raise ValueError("Mismatch between number of keys and values.")
|
| 290 |
+
|
| 291 |
+
return result
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def check_constraint(response, constraint, constraint_val):
|
| 295 |
+
if constraint_val.strip() == "":
|
| 296 |
+
# empty contraint (placeholder), directly return 1
|
| 297 |
+
return 1
|
| 298 |
+
elif "contain" in constraint:
|
| 299 |
+
occurs_records = {}
|
| 300 |
+
parsed_constraint = parse_constraints(constraint, constraint_val)
|
| 301 |
+
response = response.replace("**", "") # Remove markdown around bolded letters
|
| 302 |
+
if "contain_only" in parsed_constraint:
|
| 303 |
+
num_satisfied = 0
|
| 304 |
+
conditions = parse_nested_str_list(parsed_constraint["contain_only"])
|
| 305 |
+
for cond in conditions:
|
| 306 |
+
count, occurs = 0, []
|
| 307 |
+
for item in cond: # check one condition
|
| 308 |
+
count_, occurs_ = find_string_occurrences_with_variations(
|
| 309 |
+
response, item
|
| 310 |
+
)
|
| 311 |
+
if count_ > 0:
|
| 312 |
+
count += count_
|
| 313 |
+
occurs.extend(occurs_)
|
| 314 |
+
if count > 0:
|
| 315 |
+
num_satisfied += 1
|
| 316 |
+
occurs_records[tuple(cond)] = occurs
|
| 317 |
+
score = 1 if num_satisfied == 1 else 0
|
| 318 |
+
else: # the vanilla "contain" constraint
|
| 319 |
+
items = str_to_iterable(list, parsed_constraint["contain"])
|
| 320 |
+
count, occurs = 0, []
|
| 321 |
+
for item in items:
|
| 322 |
+
count_, occurs_ = find_string_occurrences_with_variations(
|
| 323 |
+
response, item
|
| 324 |
+
)
|
| 325 |
+
if count_ > 0:
|
| 326 |
+
count += count_
|
| 327 |
+
occurs.extend(occurs_)
|
| 328 |
+
if count > 0:
|
| 329 |
+
occurs_records[tuple(items)] = occurs
|
| 330 |
+
score = 0 if count == 0 else 1
|
| 331 |
+
|
| 332 |
+
## Other logics like position or repeat, only check when
|
| 333 |
+
## previous "contain" consraint passes
|
| 334 |
+
if score > 0:
|
| 335 |
+
occurs = list(occurs_records.values())[0]
|
| 336 |
+
if "position_only" in parsed_constraint:
|
| 337 |
+
pos = parsed_constraint["position_only"]
|
| 338 |
+
score = 1 if len(occurs) == 1 and occurs[0][0] == pos else 0
|
| 339 |
+
return score
|
| 340 |
+
elif "position" in parsed_constraint:
|
| 341 |
+
pos = parsed_constraint["position"]
|
| 342 |
+
occurs_sent_ids = [item[0] for item in occurs]
|
| 343 |
+
score = 1 if pos in occurs_sent_ids else 0
|
| 344 |
+
|
| 345 |
+
# check occurance times
|
| 346 |
+
if "times" in parsed_constraint:
|
| 347 |
+
repeat_times = parsed_constraint["times"]
|
| 348 |
+
total_occurs = sum([item[1] for item in occurs])
|
| 349 |
+
score = 1 if total_occurs == repeat_times else 0
|
| 350 |
+
|
| 351 |
+
elif "length" in constraint:
|
| 352 |
+
try:
|
| 353 |
+
len_constraint = int(constraint_val[1:])
|
| 354 |
+
words = re.findall(r"\b\w+\b", response)
|
| 355 |
+
if constraint_val.strip() == "":
|
| 356 |
+
score = 1 ## dummy placeholder constraint, score is 1
|
| 357 |
+
elif constraint_val[0] == "<":
|
| 358 |
+
score = 1 if len(words) < len_constraint else 0
|
| 359 |
+
elif constraint_val[0] == ">":
|
| 360 |
+
score = 1 if len(words) > len_constraint else 0
|
| 361 |
+
else:
|
| 362 |
+
file_logger.warning(f"Unknown length info {constraint_val}")
|
| 363 |
+
except ValueError:
|
| 364 |
+
file_logger.warning(f"Wrong length info {constraint_val}")
|
| 365 |
+
score = 0
|
| 366 |
+
elif "acrostic" in constraint:
|
| 367 |
+
response = response.replace("**", "")
|
| 368 |
+
|
| 369 |
+
lines = response.strip().lower().split("\n")
|
| 370 |
+
if len(lines) != len(constraint_val):
|
| 371 |
+
return 0
|
| 372 |
+
all_match = True
|
| 373 |
+
if "acrostic_alliteration" in constraint:
|
| 374 |
+
for line, letter in zip(lines, constraint_val.lower()):
|
| 375 |
+
line = line.strip()
|
| 376 |
+
if letter == " ":
|
| 377 |
+
if line != "":
|
| 378 |
+
all_match = False
|
| 379 |
+
break
|
| 380 |
+
elif not line or not all(word[0] == letter for word in line.split(" ")):
|
| 381 |
+
all_match = False
|
| 382 |
+
break
|
| 383 |
+
else:
|
| 384 |
+
for line, letter in zip(lines, constraint_val.lower()):
|
| 385 |
+
line = line.strip()
|
| 386 |
+
if letter == " ":
|
| 387 |
+
if line != "":
|
| 388 |
+
all_match = False
|
| 389 |
+
break
|
| 390 |
+
elif not line or not line[0] == letter:
|
| 391 |
+
all_match = False
|
| 392 |
+
break
|
| 393 |
+
score = 1 if all_match else 0
|
| 394 |
+
else:
|
| 395 |
+
response = response.strip()
|
| 396 |
+
response = response.replace(".", "")
|
| 397 |
+
response = response.replace(",", "")
|
| 398 |
+
response = response.replace("!", "")
|
| 399 |
+
response = response.replace("?", "")
|
| 400 |
+
response = response.replace(":", "")
|
| 401 |
+
response = response.replace(";", "")
|
| 402 |
+
response = response.replace('"', "")
|
| 403 |
+
response = response.replace("-", " ")
|
| 404 |
+
response = response.replace("—", " ")
|
| 405 |
+
response = re.sub(
|
| 406 |
+
" *\(\w\) *(?=\n|$)", "", response
|
| 407 |
+
) # The parenthesized letter in the rhyming scheme
|
| 408 |
+
|
| 409 |
+
lines = response.lower().split("\n")
|
| 410 |
+
match constraint:
|
| 411 |
+
case "syllables":
|
| 412 |
+
syllable_count_intervals = parse_syllable_ranges(constraint_val)
|
| 413 |
+
if len(lines) != len(syllable_count_intervals):
|
| 414 |
+
return 0
|
| 415 |
+
try:
|
| 416 |
+
all_match = all(
|
| 417 |
+
any(
|
| 418 |
+
min_count <= syll_count <= max_count
|
| 419 |
+
for syll_count in count_syllables(line)
|
| 420 |
+
)
|
| 421 |
+
for line, (min_count, max_count) in zip(
|
| 422 |
+
lines, syllable_count_intervals
|
| 423 |
+
)
|
| 424 |
+
)
|
| 425 |
+
except IndexError:
|
| 426 |
+
all_match = None
|
| 427 |
+
score = 1 if all_match else 0
|
| 428 |
+
case "rhyming_scheme":
|
| 429 |
+
# Ensure that the number of lines is the same as the number in the rhyming scheme
|
| 430 |
+
if len(lines) != len(constraint_val):
|
| 431 |
+
return 0
|
| 432 |
+
last_words = []
|
| 433 |
+
for line in lines:
|
| 434 |
+
if line.strip(): # Check if line has non-whitespace content
|
| 435 |
+
words = line.split()
|
| 436 |
+
last_words.append(words[-1] if words else "")
|
| 437 |
+
else:
|
| 438 |
+
last_words.append("")
|
| 439 |
+
|
| 440 |
+
# Map each rhyming scheme letter to the last word of a line
|
| 441 |
+
letter_to_words = collections.defaultdict(set)
|
| 442 |
+
for rhyme_letter, word in zip(constraint_val, last_words):
|
| 443 |
+
if rhyme_letter == " ":
|
| 444 |
+
if word != "":
|
| 445 |
+
return 0
|
| 446 |
+
else:
|
| 447 |
+
letter_to_words[rhyme_letter].add(word)
|
| 448 |
+
|
| 449 |
+
# Check that 1. The words for the same letter all rhyme
|
| 450 |
+
letter_to_rhyming_parts = {}
|
| 451 |
+
for letter, words in letter_to_words.items():
|
| 452 |
+
rhyming_parts: list[set[str]] = [
|
| 453 |
+
{
|
| 454 |
+
rhyming_part_include_unstressed(pronunciations)
|
| 455 |
+
for pronunciations in phones_for_word(word)
|
| 456 |
+
}
|
| 457 |
+
for word in words
|
| 458 |
+
]
|
| 459 |
+
common_rhyming_parts = set.intersection(*rhyming_parts)
|
| 460 |
+
if not common_rhyming_parts:
|
| 461 |
+
return 0
|
| 462 |
+
letter_to_rhyming_parts[letter] = common_rhyming_parts
|
| 463 |
+
# Check that 2. The words for different letters do not rhyme
|
| 464 |
+
for a, b in itertools.combinations(letter_to_rhyming_parts, 2):
|
| 465 |
+
# To simplify things, if there are any shared pronunciations between two different letters, we reject it
|
| 466 |
+
if letter_to_rhyming_parts[a] & letter_to_rhyming_parts[b]:
|
| 467 |
+
return 0
|
| 468 |
+
score = 1
|
| 469 |
+
case "poetry_meter":
|
| 470 |
+
all_match = all(is_line_iambic(line) for line in lines)
|
| 471 |
+
score = 1 if all_match else 0
|
| 472 |
+
case _:
|
| 473 |
+
file_logger.warning(f"Unknown constraint type {constraint}")
|
| 474 |
+
score = 0
|
| 475 |
+
|
| 476 |
+
return score
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
class ConstrainedGenerationEval:
|
| 480 |
+
"""
|
| 481 |
+
Constrained generation metric
|
| 482 |
+
"""
|
| 483 |
+
|
| 484 |
+
timeout = 10
|
| 485 |
+
|
| 486 |
+
@classmethod
|
| 487 |
+
def match(cls, response, constraints) -> Number:
|
| 488 |
+
scores = []
|
| 489 |
+
eval_results = {}
|
| 490 |
+
|
| 491 |
+
def handler(signum, frame):
|
| 492 |
+
raise TimeoutError()
|
| 493 |
+
|
| 494 |
+
def check_with_timeout(constraint, constraint_val):
|
| 495 |
+
# Set the signal handler and a timeout
|
| 496 |
+
signal.signal(signal.SIGALRM, handler)
|
| 497 |
+
signal.alarm(cls.timeout) # Set the timeout
|
| 498 |
+
|
| 499 |
+
try:
|
| 500 |
+
# Try to check the constraint
|
| 501 |
+
score = check_constraint(response, constraint, constraint_val)
|
| 502 |
+
except TimeoutError:
|
| 503 |
+
print(f"Timeout reached for constraint: {constraint}")
|
| 504 |
+
score = 0 # Set score to 0 if timeout occurs
|
| 505 |
+
finally:
|
| 506 |
+
signal.alarm(0) # Reset the alarm
|
| 507 |
+
|
| 508 |
+
return score
|
| 509 |
+
|
| 510 |
+
for constraint, constraint_val in constraints.items():
|
| 511 |
+
score = check_with_timeout(constraint, constraint_val)
|
| 512 |
+
scores.append(score)
|
| 513 |
+
eval_results[constraint] = score
|
| 514 |
+
|
| 515 |
+
final_score = min(scores)
|
| 516 |
+
eval_info = "\t".join([f"{key}: {val}" for key, val in eval_results.items()])
|
| 517 |
+
|
| 518 |
+
return final_score, eval_info
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/coordinate_sequence_match.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from .common.conversions import str_to_coords
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class CoordsSequenceSimilarity:
|
| 7 |
+
"""
|
| 8 |
+
Measure the similarity between two list of coordinates, used for keypoint estimation tasks
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
@staticmethod
|
| 12 |
+
def compute_score(pred_keypoints, gt_keypoints, k=10):
|
| 13 |
+
"""
|
| 14 |
+
Compute the evaluation score for keypoint estimation.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
pred_keypoints (list or np.ndarray): List or array of predicted keypoint coordinates,
|
| 18 |
+
each as (x, y), normalized to [0, 1].
|
| 19 |
+
gt_keypoints (list or np.ndarray): List or array of ground truth keypoint coordinates,
|
| 20 |
+
each as (x, y), normalized to [0, 1].
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
float: A score between 0 and 1, where 1 indicates perfect accuracy,
|
| 24 |
+
and 0 indicates completely wrong.
|
| 25 |
+
"""
|
| 26 |
+
# Convert inputs to NumPy arrays
|
| 27 |
+
try:
|
| 28 |
+
pred_keypoints = np.array(pred_keypoints)
|
| 29 |
+
except ValueError:
|
| 30 |
+
# Format is not a correct
|
| 31 |
+
return 0
|
| 32 |
+
|
| 33 |
+
gt_keypoints = np.array(gt_keypoints)
|
| 34 |
+
|
| 35 |
+
# shape mismatch, directly assign 0 score
|
| 36 |
+
if pred_keypoints.shape != gt_keypoints.shape:
|
| 37 |
+
return 0
|
| 38 |
+
|
| 39 |
+
# Compute Euclidean distances between corresponding keypoints
|
| 40 |
+
distances = np.linalg.norm(pred_keypoints - gt_keypoints, axis=1)
|
| 41 |
+
|
| 42 |
+
# Maximum possible distance in normalized coordinate space
|
| 43 |
+
max_distance = np.sqrt(2)
|
| 44 |
+
|
| 45 |
+
# Normalize distances
|
| 46 |
+
normalized_distances = distances / max_distance
|
| 47 |
+
|
| 48 |
+
# Compute per-keypoint scores using exponential decay
|
| 49 |
+
per_keypoint_scores = np.exp(-k * normalized_distances)
|
| 50 |
+
|
| 51 |
+
# Compute the average score across all keypoints
|
| 52 |
+
score = np.mean(per_keypoint_scores)
|
| 53 |
+
|
| 54 |
+
return score
|
| 55 |
+
|
| 56 |
+
@classmethod
|
| 57 |
+
def match(cls, responses, targets) -> float:
|
| 58 |
+
"""Exact match between targets and responses."""
|
| 59 |
+
logging.debug(f"{responses=}, {targets=}")
|
| 60 |
+
if not isinstance(responses, (tuple | list)):
|
| 61 |
+
responses = str_to_coords(responses, dim=2)
|
| 62 |
+
if not isinstance(targets, (tuple | list)):
|
| 63 |
+
targets = str_to_coords(targets, dim=2)
|
| 64 |
+
|
| 65 |
+
return cls.compute_score(responses, targets)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_jaccard_agg_jaccard.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .jaccard import Jaccard
|
| 2 |
+
from .common.conversions import cast_to_dict
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class DictJaccardAggJaccard:
|
| 6 |
+
"""Calculates the Jaccard index, dividing by the union of the predictions.
|
| 7 |
+
|
| 8 |
+
1. Calculates the Jaccard index for all sets with the same key,
|
| 9 |
+
if it appears in either pred or targets
|
| 10 |
+
2. Calculates the total, then divides by the size of the union
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def match(cls, responses, targets) -> float:
|
| 15 |
+
"""Return the aggregated Jaccard index between targets and responses."""
|
| 16 |
+
responses = cast_to_dict(responses)
|
| 17 |
+
if not isinstance(responses, dict):
|
| 18 |
+
return 0
|
| 19 |
+
|
| 20 |
+
all_keys = set(responses) | set(targets)
|
| 21 |
+
|
| 22 |
+
num_keys = 0
|
| 23 |
+
total_score = 0
|
| 24 |
+
for key in all_keys:
|
| 25 |
+
total_score += Jaccard.match(responses.get(key, []), targets.get(key, []))
|
| 26 |
+
num_keys += 1
|
| 27 |
+
|
| 28 |
+
return total_score / num_keys
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_set_equality_agg_jaccard.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from vlmeval.dataset.utils.megabench.scoring.set_equality import SetEquality
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class DictSetEqualityAggJaccard:
|
| 5 |
+
"""Calculates the average set equality across the dict.
|
| 6 |
+
|
| 7 |
+
1. Calculates the set equality for all sets with the same key,
|
| 8 |
+
if it appears in either pred or targets
|
| 9 |
+
2. Calculates the total, then divides by the size of the union
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def match(cls, responses, targets) -> float:
|
| 14 |
+
"""Return the aggregated Jaccard index between targets and responses."""
|
| 15 |
+
if not isinstance(responses, dict):
|
| 16 |
+
return 0
|
| 17 |
+
|
| 18 |
+
all_keys = set(responses) | set(targets)
|
| 19 |
+
|
| 20 |
+
num_keys = 0
|
| 21 |
+
total_score = 0
|
| 22 |
+
for key in all_keys:
|
| 23 |
+
total_score += SetEquality.match(
|
| 24 |
+
responses.get(key, []), targets.get(key, [])
|
| 25 |
+
)
|
| 26 |
+
num_keys += 1
|
| 27 |
+
|
| 28 |
+
return total_score / num_keys
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match_case_insensitive.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .exact_str_match import ExactStrMatch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class ExactStrMatchCaseInsensitive:
|
| 5 |
+
"""Case-insensitive exact string matching."""
|
| 6 |
+
|
| 7 |
+
@staticmethod
|
| 8 |
+
def match(response, correct_answer) -> int:
|
| 9 |
+
"""Case-insensitive exact match between targets and responses."""
|
| 10 |
+
if not isinstance(response, str) and isinstance(correct_answer, str):
|
| 11 |
+
return 0
|
| 12 |
+
return ExactStrMatch.match(response.lower(), correct_answer.lower())
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/geo_proximity.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
from geopy.distance import distance
|
| 6 |
+
from geopy.geocoders import Nominatim
|
| 7 |
+
from geopy.extra.rate_limiter import RateLimiter
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
USER_AGENT_SUFFIX = hex(random.getrandbits(128))[2:]
|
| 11 |
+
geolocator = Nominatim(user_agent=f"vlm-mega-benchmark_{USER_AGENT_SUFFIX}")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
error_logger = logging.getLogger("errorLogger")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def calculate_proximity_score(guess_coords, actual_coords, k=100):
|
| 18 |
+
"""Calculate the proximity score based on the location.
|
| 19 |
+
|
| 20 |
+
Exponentially decreases depending on the distance.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
guess_coords (float, float): The longitude and latitude of the guessed coordinates.
|
| 24 |
+
actual_coords (float, float): The longitude and latitude of the actual coordinates.
|
| 25 |
+
k (numbers.Number): The threshold (in km) at which we get a score of 0.5.
|
| 26 |
+
"""
|
| 27 |
+
dist = distance(guess_coords, actual_coords).km
|
| 28 |
+
proximity_score = math.exp(-dist / k)
|
| 29 |
+
return proximity_score
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
GEOLOCATION_TIMEOUT = 1
|
| 33 |
+
MAX_RETRIES = 30
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
geocode = RateLimiter(
|
| 37 |
+
geolocator.geocode, min_delay_seconds=GEOLOCATION_TIMEOUT, max_retries=MAX_RETRIES
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@functools.cache
|
| 42 |
+
def try_geolocate(query):
|
| 43 |
+
"""Try to look up the location."""
|
| 44 |
+
location = geocode(query)
|
| 45 |
+
if location is None:
|
| 46 |
+
error_logger.error(
|
| 47 |
+
f"Geolocation API request failed due to timeout: exceeded {MAX_RETRIES} retries!"
|
| 48 |
+
)
|
| 49 |
+
return location
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def location_to_coords(
|
| 53 |
+
country: str, province_or_state: str, municipality: str
|
| 54 |
+
) -> tuple[float, float] | None:
|
| 55 |
+
if country == "" or province_or_state == "" or municipality == "":
|
| 56 |
+
return None
|
| 57 |
+
"""Convert the location to longitude and latitude."""
|
| 58 |
+
location = geolocator.geocode(
|
| 59 |
+
query={"country": country, "state": province_or_state, "city": municipality}
|
| 60 |
+
)
|
| 61 |
+
if location is not None:
|
| 62 |
+
return (location.latitude, location.longitude)
|
| 63 |
+
# Try searching without the province/state, as it can be non-standard for some questions
|
| 64 |
+
location = geolocator.geocode(query={"country": country, "city": municipality})
|
| 65 |
+
if location is None:
|
| 66 |
+
return None
|
| 67 |
+
return (location.latitude, location.longitude)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class GeoProximityLocationDict:
|
| 71 |
+
"""Return a score based on the distance between two locations."""
|
| 72 |
+
|
| 73 |
+
@classmethod
|
| 74 |
+
def match(cls, responses, targets) -> float:
|
| 75 |
+
"""Return a score based on how far two targets are away from each other,
|
| 76 |
+
where each field is a dict with the following schema:
|
| 77 |
+
{
|
| 78 |
+
country: str,
|
| 79 |
+
province_or_state: str,
|
| 80 |
+
municipality: str
|
| 81 |
+
}
|
| 82 |
+
"""
|
| 83 |
+
try:
|
| 84 |
+
guess_coords = location_to_coords(**responses)
|
| 85 |
+
except:
|
| 86 |
+
return 0
|
| 87 |
+
|
| 88 |
+
if guess_coords is None:
|
| 89 |
+
error_logger.error(
|
| 90 |
+
f"GeoProximityLocationDict: could not load co-ordinates for {responses=}"
|
| 91 |
+
)
|
| 92 |
+
return 0
|
| 93 |
+
actual_coords = location_to_coords(**targets)
|
| 94 |
+
if actual_coords is None:
|
| 95 |
+
error_logger.error(
|
| 96 |
+
f"GeoProximityLocationDict: could not load co-ordinates for {targets=}"
|
| 97 |
+
)
|
| 98 |
+
return 0
|
| 99 |
+
|
| 100 |
+
return calculate_proximity_score(guess_coords, actual_coords)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/gleu.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numbers import Number
|
| 2 |
+
import jieba
|
| 3 |
+
from nltk.translate.gleu_score import sentence_gleu
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class GLEUChinese:
|
| 7 |
+
"""Compute GLEU score for Chinese text."""
|
| 8 |
+
|
| 9 |
+
@staticmethod
|
| 10 |
+
def match(response, correct_answer) -> Number:
|
| 11 |
+
"""Compute the BLEU scores between two strings."""
|
| 12 |
+
if isinstance(response, str) and isinstance(correct_answer, str):
|
| 13 |
+
reference_tokens = list(jieba.cut_for_search(response))
|
| 14 |
+
translation_tokens = list(jieba.cut_for_search(correct_answer))
|
| 15 |
+
else:
|
| 16 |
+
return 0
|
| 17 |
+
return sentence_gleu([reference_tokens], translation_tokens)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/jaccard.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import cast_to_set
|
| 2 |
+
from .common.metrics import jaccard_index
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Jaccard:
|
| 6 |
+
"""Calculates the Jacard index for iterables."""
|
| 7 |
+
|
| 8 |
+
@classmethod
|
| 9 |
+
def match(cls, responses, targets) -> float:
|
| 10 |
+
"""Exact match between targets and responses."""
|
| 11 |
+
if responses is None:
|
| 12 |
+
return 0
|
| 13 |
+
responses = cast_to_set(responses)
|
| 14 |
+
targets = cast_to_set(targets)
|
| 15 |
+
|
| 16 |
+
return jaccard_index(responses, targets)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class JaccardCaseInsensitive:
|
| 20 |
+
"""Calculates the Jacard index for iterables of strings,
|
| 21 |
+
Do not consider the case
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
@classmethod
|
| 25 |
+
def match(cls, responses, targets) -> float:
|
| 26 |
+
"""Exact match between targets and responses."""
|
| 27 |
+
if responses is None:
|
| 28 |
+
return 0
|
| 29 |
+
responses = cast_to_set(responses)
|
| 30 |
+
targets = cast_to_set(targets)
|
| 31 |
+
|
| 32 |
+
if isinstance(list(targets)[0], str):
|
| 33 |
+
new_responses = {
|
| 34 |
+
item.lower() if isinstance(item, str) else str(item).lower()
|
| 35 |
+
for item in responses
|
| 36 |
+
}
|
| 37 |
+
new_targets = {item.lower() for item in targets}
|
| 38 |
+
elif isinstance(list(targets)[0], tuple):
|
| 39 |
+
new_responses = set()
|
| 40 |
+
new_targets = set()
|
| 41 |
+
try:
|
| 42 |
+
for res in responses:
|
| 43 |
+
new_res = tuple(
|
| 44 |
+
[
|
| 45 |
+
item.lower()
|
| 46 |
+
.replace(" ", "")
|
| 47 |
+
.replace("-", "")
|
| 48 |
+
.replace("\n", "")
|
| 49 |
+
.replace("\t", "")
|
| 50 |
+
.replace("_", "")
|
| 51 |
+
.replace(".", "")
|
| 52 |
+
for item in res
|
| 53 |
+
]
|
| 54 |
+
)
|
| 55 |
+
new_responses.add(new_res)
|
| 56 |
+
except: # the data type of the response might be wrong, return 0 in this case
|
| 57 |
+
return 0
|
| 58 |
+
for tgt in targets:
|
| 59 |
+
new_tgt = tuple(
|
| 60 |
+
[
|
| 61 |
+
item.lower()
|
| 62 |
+
.replace(" ", "")
|
| 63 |
+
.replace("-", "")
|
| 64 |
+
.replace("\n", "")
|
| 65 |
+
.replace("\t", "")
|
| 66 |
+
.replace("_", "")
|
| 67 |
+
.replace(".", "")
|
| 68 |
+
for item in tgt
|
| 69 |
+
]
|
| 70 |
+
)
|
| 71 |
+
new_targets.add(new_tgt)
|
| 72 |
+
else:
|
| 73 |
+
return 0
|
| 74 |
+
|
| 75 |
+
return jaccard_index(new_responses, new_targets)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/latex_expr_equality.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from sympy.parsing.latex import parse_latex
|
| 3 |
+
from sympy.parsing.latex.errors import LaTeXParsingError
|
| 4 |
+
from sympy.core.sympify import SympifyError
|
| 5 |
+
from .common.transformations import normalize_latex
|
| 6 |
+
from .simple_str_match import SimpleStrMatch
|
| 7 |
+
import signal
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TimeoutException(Exception):
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def timeout_handler(signum, frame):
|
| 15 |
+
raise TimeoutException()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class LatexExprEquality:
|
| 19 |
+
"""Determines if two LaTeX expressions are equal."""
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def match(cls, responses, targets, timeout_duration=15) -> int:
|
| 23 |
+
"""Whether two LaTeX expressions are equal."""
|
| 24 |
+
if not isinstance(responses, str) or not isinstance(targets, str):
|
| 25 |
+
return 0
|
| 26 |
+
signal.signal(signal.SIGALRM, timeout_handler)
|
| 27 |
+
signal.alarm(timeout_duration) # Set the timeout duration in seconds
|
| 28 |
+
try:
|
| 29 |
+
# seems that this eval can get stuck when evaluating all tasks..
|
| 30 |
+
responses = normalize_latex(responses)
|
| 31 |
+
targets = normalize_latex(targets)
|
| 32 |
+
responses_expr = parse_latex(responses)
|
| 33 |
+
targets_expr = parse_latex(targets)
|
| 34 |
+
result = 1 if responses_expr.equals(targets_expr) else 0
|
| 35 |
+
return result
|
| 36 |
+
except (
|
| 37 |
+
LaTeXParsingError,
|
| 38 |
+
SympifyError,
|
| 39 |
+
TypeError,
|
| 40 |
+
TimeoutException,
|
| 41 |
+
NotImplementedError,
|
| 42 |
+
):
|
| 43 |
+
return SimpleStrMatch.match(responses, targets)
|
| 44 |
+
finally:
|
| 45 |
+
signal.alarm(0) # Cancel the alarm if it completes successfully
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def separate_text_and_latex(text):
|
| 49 |
+
# Regular expression to match LaTeX content between $ symbols
|
| 50 |
+
pattern = r"(\$[^$]*\$)"
|
| 51 |
+
|
| 52 |
+
# Split the text based on LaTeX parts
|
| 53 |
+
parts = re.split(pattern, text)
|
| 54 |
+
|
| 55 |
+
# Separate plain text and LaTeX
|
| 56 |
+
latex_content = []
|
| 57 |
+
plain_text = []
|
| 58 |
+
|
| 59 |
+
for part in parts:
|
| 60 |
+
if part.startswith("$") and part.endswith("$"):
|
| 61 |
+
latex_content.append(part)
|
| 62 |
+
else:
|
| 63 |
+
plain_text.append(part.strip())
|
| 64 |
+
|
| 65 |
+
return plain_text, latex_content
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def join_latex(latex_exps):
|
| 69 |
+
result = []
|
| 70 |
+
for exp in latex_exps:
|
| 71 |
+
result.append(exp[1:-1].strip().replace(",", ""))
|
| 72 |
+
result = f"{' '.join(result)}"
|
| 73 |
+
return result
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class TextLatexExprEquality:
|
| 77 |
+
"""Determines if two LaTeX expressions are equal."""
|
| 78 |
+
|
| 79 |
+
@classmethod
|
| 80 |
+
def match(cls, responses, targets) -> int:
|
| 81 |
+
"""Whether two LaTeX expressions are equal."""
|
| 82 |
+
if not isinstance(responses, str) or not isinstance(targets, str):
|
| 83 |
+
return 0
|
| 84 |
+
|
| 85 |
+
tgt_texts, tgt_latex = separate_text_and_latex(targets)
|
| 86 |
+
res_texts, res_latex = separate_text_and_latex(responses)
|
| 87 |
+
|
| 88 |
+
res_text_join = "".join(res_texts).replace(",", "")
|
| 89 |
+
tgt_text_join = "".join(tgt_texts).replace(",", "")
|
| 90 |
+
text_match = SimpleStrMatch.match(res_text_join, tgt_text_join)
|
| 91 |
+
|
| 92 |
+
res_latex_join = join_latex(res_latex)
|
| 93 |
+
tgt_latex_join = join_latex(tgt_latex)
|
| 94 |
+
latex_match = LatexExprEquality.match(res_latex_join, tgt_latex_join)
|
| 95 |
+
|
| 96 |
+
return 1 if text_match and latex_match else 0
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/program_judge.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import pathlib
|
| 3 |
+
import json
|
| 4 |
+
import multiprocessing
|
| 5 |
+
from unittest.mock import patch
|
| 6 |
+
from multiprocessing.queues import Empty
|
| 7 |
+
|
| 8 |
+
BIG_BENCH_PATH = pathlib.Path(__file__).resolve().parent.parent.parent
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ProgramJudge:
|
| 12 |
+
"""Program Judging."""
|
| 13 |
+
|
| 14 |
+
# Check if results have been saved for this metric instance
|
| 15 |
+
# prevent duplicate saving results
|
| 16 |
+
task_saved = {}
|
| 17 |
+
|
| 18 |
+
@classmethod
|
| 19 |
+
def save_test_results(cls, task_name, results, query_file):
|
| 20 |
+
query_base = pathlib.Path(query_file).parent
|
| 21 |
+
output_dir = query_base / "code_eval"
|
| 22 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 23 |
+
output_file = output_dir / f"{task_name}_test_case.json"
|
| 24 |
+
|
| 25 |
+
saved = cls.task_saved.get(task_name, False)
|
| 26 |
+
if output_file.is_file() and saved:
|
| 27 |
+
with open(output_file, "r") as f:
|
| 28 |
+
existing_data = json.load(f)
|
| 29 |
+
existing_data.extend(results)
|
| 30 |
+
else:
|
| 31 |
+
existing_data = results
|
| 32 |
+
cls.task_saved[task_name] = True
|
| 33 |
+
with open(output_file, "w") as f:
|
| 34 |
+
json.dump(existing_data, f, indent=4)
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def match(response: str, eval_context: str, task_info: str = None) -> int:
|
| 38 |
+
# Load all test cases from the benchmark_tasks directory
|
| 39 |
+
# task_name = task_info["task_name"]
|
| 40 |
+
# task_folder = task_info["task_folder"]
|
| 41 |
+
# query_results_file = task_info["results_file"]
|
| 42 |
+
|
| 43 |
+
test_cases = eval_context["test_case"]
|
| 44 |
+
|
| 45 |
+
# Create a CodeTester instance with the response and the found test cases
|
| 46 |
+
tester = CodeTester(response, test_cases)
|
| 47 |
+
score, results = tester.run_tests()
|
| 48 |
+
|
| 49 |
+
# ProgramJudge.save_test_results(task_name, results, query_results_file)
|
| 50 |
+
return score
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
#########################################################
|
| 54 |
+
### Implementation of the automatic code tester
|
| 55 |
+
#########################################################
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class CodeTester:
|
| 59 |
+
def __init__(self, user_code, test_cases, timeout=2, verbose=True):
|
| 60 |
+
self.user_code = user_code
|
| 61 |
+
self.test_cases = test_cases
|
| 62 |
+
self.timeout = timeout
|
| 63 |
+
self.verbose = verbose
|
| 64 |
+
|
| 65 |
+
def run_user_code(self, input_data):
|
| 66 |
+
input_str = "\n".join(input_data) + "\n"
|
| 67 |
+
output_queue = multiprocessing.Queue()
|
| 68 |
+
process = multiprocessing.Process(
|
| 69 |
+
target=self.target, args=(output_queue, input_str)
|
| 70 |
+
)
|
| 71 |
+
process.start()
|
| 72 |
+
|
| 73 |
+
process.join(self.timeout)
|
| 74 |
+
if process.is_alive():
|
| 75 |
+
process.terminate()
|
| 76 |
+
return f"ERROR: Code execution exceeded the time limit."
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
result = output_queue.get(timeout=10) # Add timeout for queue retrieval
|
| 80 |
+
except Empty:
|
| 81 |
+
return "ERROR: No output was produced before timeout."
|
| 82 |
+
finally:
|
| 83 |
+
output_queue.close() # Close the queue to release resources
|
| 84 |
+
output_queue.join_thread() # Ensure all items in the queue are processed
|
| 85 |
+
|
| 86 |
+
return result
|
| 87 |
+
|
| 88 |
+
def target(self, output_queue, input_str):
|
| 89 |
+
contains_main_block = 'if __name__ == "__main__":' in self.user_code
|
| 90 |
+
stdout = io.StringIO()
|
| 91 |
+
try:
|
| 92 |
+
with patch("builtins.input", side_effect=input_str.splitlines()):
|
| 93 |
+
with patch("sys.stdout", new=stdout):
|
| 94 |
+
if contains_main_block:
|
| 95 |
+
# If the user code contains the main block, execute in the context of __name__ == "__main__"
|
| 96 |
+
exec(self.user_code, {"__name__": "__main__"})
|
| 97 |
+
else:
|
| 98 |
+
# Otherwise, just execute the user code directly
|
| 99 |
+
exec(self.user_code)
|
| 100 |
+
except Exception as e:
|
| 101 |
+
output_queue.put(f"ERROR during execution: {e}")
|
| 102 |
+
else:
|
| 103 |
+
output_queue.put(stdout.getvalue().rstrip())
|
| 104 |
+
|
| 105 |
+
def evaluate_test_case(self, input_data, expected_output):
|
| 106 |
+
output = self.run_user_code(input_data)
|
| 107 |
+
return output == expected_output.rstrip(), output
|
| 108 |
+
|
| 109 |
+
def run_tests(self):
|
| 110 |
+
if isinstance(self.test_cases, dict):
|
| 111 |
+
self.test_cases = [self.test_cases]
|
| 112 |
+
total_tests = len(self.test_cases)
|
| 113 |
+
passed_tests = 0
|
| 114 |
+
results = []
|
| 115 |
+
|
| 116 |
+
for i, test_case in enumerate(self.test_cases, 1):
|
| 117 |
+
result, output = self.evaluate_test_case(
|
| 118 |
+
test_case["input"], test_case["expected"]
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
test_result = {
|
| 122 |
+
"response": self.user_code,
|
| 123 |
+
"test_case": test_case["input"],
|
| 124 |
+
"output": output,
|
| 125 |
+
"expected": test_case["expected"],
|
| 126 |
+
"result": "Passed" if result else "Failed",
|
| 127 |
+
}
|
| 128 |
+
results.append(test_result)
|
| 129 |
+
|
| 130 |
+
if result:
|
| 131 |
+
if self.verbose:
|
| 132 |
+
print(f"Test case {i}: Passed")
|
| 133 |
+
passed_tests += 1
|
| 134 |
+
else:
|
| 135 |
+
if self.verbose:
|
| 136 |
+
print(
|
| 137 |
+
f"Test case {i}: Failed - Expected {test_case['expected']} but got {output}"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
score = passed_tests / total_tests if total_tests > 0 else 0
|
| 141 |
+
return score, results
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sequence_equality.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import str_to_list
|
| 2 |
+
from numbers import Number
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class SequenceEquality:
|
| 6 |
+
"""Determines how much of the first part of the list
|
| 7 |
+
was predicted correctly.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
@classmethod
|
| 11 |
+
def match(cls, responses, targets) -> int:
|
| 12 |
+
"""Exact match between targets and responses."""
|
| 13 |
+
if not isinstance(responses, str):
|
| 14 |
+
responses = str(responses)
|
| 15 |
+
responses = str_to_list(responses)
|
| 16 |
+
targets = str_to_list(targets)
|
| 17 |
+
return 1 if responses == targets else 0
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SequenceEqualityCaseInsensitive:
|
| 21 |
+
"""Determines how much of the first part of the list
|
| 22 |
+
was predicted correctly.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
@classmethod
|
| 26 |
+
def match(cls, responses, targets) -> int:
|
| 27 |
+
"""Exact match between targets and responses."""
|
| 28 |
+
if not isinstance(responses, str):
|
| 29 |
+
responses = str(responses)
|
| 30 |
+
responses = str_to_list(responses)
|
| 31 |
+
targets = str_to_list(targets)
|
| 32 |
+
|
| 33 |
+
responses = [
|
| 34 |
+
item.lower() if isinstance(item, str) else str(item) for item in responses
|
| 35 |
+
]
|
| 36 |
+
targets = [item.lower() for item in targets]
|
| 37 |
+
return 1 if responses == targets else 0
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class SequenceAccuracyCaseInsensitive:
|
| 41 |
+
"""Determines how much of the first part of the list
|
| 42 |
+
was predicted correctly.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
@classmethod
|
| 46 |
+
def match(cls, responses, targets) -> int:
|
| 47 |
+
"""Exact match between targets and responses."""
|
| 48 |
+
responses = str_to_list(responses)
|
| 49 |
+
targets = str_to_list(targets)
|
| 50 |
+
if len(targets) != len(responses):
|
| 51 |
+
return 0
|
| 52 |
+
correct = 0
|
| 53 |
+
for res, tgt in zip(responses, targets):
|
| 54 |
+
if isinstance(tgt, str):
|
| 55 |
+
if res.lower() == tgt.lower():
|
| 56 |
+
correct += 1
|
| 57 |
+
elif isinstance(tgt, Number) and isinstance(res, Number):
|
| 58 |
+
if res == tgt:
|
| 59 |
+
correct += 1
|
| 60 |
+
else:
|
| 61 |
+
pass
|
| 62 |
+
return correct / len(targets)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_equality.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import cast_to_set, str_to_set
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def _convert_to_hashable(item):
|
| 5 |
+
"""将不可哈希的类型转换为可哈希类型"""
|
| 6 |
+
if isinstance(item, (list, tuple)):
|
| 7 |
+
return tuple(item) # 将列表转换为元组
|
| 8 |
+
return item
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SetEquality:
|
| 12 |
+
"""Determines whether two sets are equal."""
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def match(cls, responses, targets) -> int:
|
| 16 |
+
"""Exact match between targets and responses."""
|
| 17 |
+
if isinstance(responses, (list, tuple)):
|
| 18 |
+
responses = {_convert_to_hashable(item) for item in responses}
|
| 19 |
+
if isinstance(targets, (list, tuple)):
|
| 20 |
+
targets = {_convert_to_hashable(item) for item in targets}
|
| 21 |
+
return 1 if responses == targets else 0
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class SetEqualityCaseInsensitive:
|
| 25 |
+
"""Determines whether two sets are equal, ignoring string case."""
|
| 26 |
+
|
| 27 |
+
@classmethod
|
| 28 |
+
def match(cls, responses, targets) -> int:
|
| 29 |
+
"""Exact match between targets and responses."""
|
| 30 |
+
try:
|
| 31 |
+
responses: set[str] = {text.upper() for text in cast_to_set(responses)}
|
| 32 |
+
targets: set[str] = {text.upper() for text in cast_to_set(targets)}
|
| 33 |
+
except AttributeError:
|
| 34 |
+
return 0
|
| 35 |
+
return 1 if responses == targets else 0
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class StringSetEqualityLineSplit:
|
| 39 |
+
"""Determines whether two sets are equal, for string inputs, separated by line breaks"""
|
| 40 |
+
|
| 41 |
+
@classmethod
|
| 42 |
+
def match(cls, responses, targets) -> int:
|
| 43 |
+
if "\\n" in targets:
|
| 44 |
+
targets = targets.replace("\\n", "\n")
|
| 45 |
+
if "\\n" in responses:
|
| 46 |
+
responses = responses.replace("\\n", "\n")
|
| 47 |
+
responses_set = set(responses.split("\n"))
|
| 48 |
+
targets_set = set(targets.split("\n"))
|
| 49 |
+
responses_set = {
|
| 50 |
+
item.lower() if isinstance(item, str) else item for item in responses_set
|
| 51 |
+
}
|
| 52 |
+
targets_set = {
|
| 53 |
+
item.lower() if isinstance(item, str) else item for item in targets_set
|
| 54 |
+
}
|
| 55 |
+
return 1 if responses_set == targets_set else 0
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class StringSetEqualityCommaSplit:
|
| 59 |
+
"""Determines whether two sets are equal, for string inputs, separated by commas
|
| 60 |
+
Handles some corner cases that would fail the general SetEquality metric, like the string
|
| 61 |
+
with "None", which fails the eval. Also do case-insensitive eval.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
@classmethod
|
| 65 |
+
def match(cls, responses, targets) -> int:
|
| 66 |
+
responses_set = str_to_set(responses)
|
| 67 |
+
targets_set = str_to_set(targets)
|
| 68 |
+
responses_set = {
|
| 69 |
+
item.lower() if isinstance(item, str) else item for item in responses_set
|
| 70 |
+
}
|
| 71 |
+
targets_set = {
|
| 72 |
+
item.lower() if isinstance(item, str) else item for item in targets_set
|
| 73 |
+
}
|
| 74 |
+
return 1 if responses_set == targets_set else 0
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/xml_norm_point_in_bbox.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .common.conversions import parse_point_2d_from_xml, str_to_bboxes
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class XmlNormPointInBbox:
|
| 5 |
+
"""Determines whether a point is located in a bounding box.
|
| 6 |
+
|
| 7 |
+
Assumes that co-ordinates are normalized between 0 and 1 and that the 2D point is
|
| 8 |
+
of the form <point>x, y</point>
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
@classmethod
|
| 12 |
+
def match(cls, responses, eval_context) -> int:
|
| 13 |
+
"""Determine if the point is in the bounding box
|
| 14 |
+
and return which bounding box was matched, if any."""
|
| 15 |
+
bounding_box_has_match = {
|
| 16 |
+
bbox: False for bbox in eval_context["bounding_boxes"]
|
| 17 |
+
}
|
| 18 |
+
bounding_boxes = [
|
| 19 |
+
str_to_bboxes(bbox_str)[0] for bbox_str in eval_context["bounding_boxes"]
|
| 20 |
+
]
|
| 21 |
+
assert bounding_boxes
|
| 22 |
+
|
| 23 |
+
if not isinstance(responses, (tuple | list)):
|
| 24 |
+
responses = parse_point_2d_from_xml(responses)
|
| 25 |
+
if not responses:
|
| 26 |
+
return 0, bounding_box_has_match
|
| 27 |
+
elif len(responses) != 2:
|
| 28 |
+
return 0, bounding_box_has_match
|
| 29 |
+
|
| 30 |
+
x, y = responses
|
| 31 |
+
for min_x, min_y, max_x, max_y in bounding_boxes:
|
| 32 |
+
if min_x <= x <= max_x and min_y <= y <= max_y:
|
| 33 |
+
bounding_box_has_match[str((min_x, min_y, max_x, max_y))] = True
|
| 34 |
+
return 1, bounding_box_has_match
|
| 35 |
+
return 0, bounding_box_has_match
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/analysis_utils.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import ast
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from typing import List, Dict, Any
|
| 5 |
+
|
| 6 |
+
_DATASET_CACHE = {}
|
| 7 |
+
_SCORING_FUNCTIONS_CACHE = {}
|
| 8 |
+
|
| 9 |
+
def _load_hf(subset_name: str) -> List[Dict[str, Any]]:
|
| 10 |
+
"""
|
| 11 |
+
Load the HF dataset for the given subset name.
|
| 12 |
+
"""
|
| 13 |
+
if subset_name in _DATASET_CACHE:
|
| 14 |
+
return _DATASET_CACHE[subset_name]
|
| 15 |
+
|
| 16 |
+
from datasets import load_dataset
|
| 17 |
+
dataset = load_dataset("TIGER-Lab/MEGA-Bench", subset_name)["test"]
|
| 18 |
+
task_dict = {}
|
| 19 |
+
for sample in dataset:
|
| 20 |
+
task_name = sample["task_name"]
|
| 21 |
+
if task_name not in task_dict:
|
| 22 |
+
task_dict[task_name] = []
|
| 23 |
+
task_dict[task_name].append(sample)
|
| 24 |
+
|
| 25 |
+
_DATASET_CACHE[subset_name] = task_dict
|
| 26 |
+
return task_dict
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _get_scoring_functions():
|
| 30 |
+
if _SCORING_FUNCTIONS_CACHE:
|
| 31 |
+
return _SCORING_FUNCTIONS_CACHE
|
| 32 |
+
|
| 33 |
+
core_data = _load_hf("core")
|
| 34 |
+
open_data = _load_hf("open")
|
| 35 |
+
|
| 36 |
+
core_scoring_functions = {}
|
| 37 |
+
open_scoring_functions = {}
|
| 38 |
+
|
| 39 |
+
for task_name, task_samples in core_data.items():
|
| 40 |
+
core_scoring_functions[task_name] = ast.literal_eval(
|
| 41 |
+
task_samples[0]["metric_info"]
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
for task_name, task_samples in open_data.items():
|
| 45 |
+
open_scoring_functions[task_name] = ast.literal_eval(
|
| 46 |
+
task_samples[0]["metric_info"]
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
_SCORING_FUNCTIONS_CACHE["core"] = core_scoring_functions
|
| 50 |
+
_SCORING_FUNCTIONS_CACHE["open"] = open_scoring_functions
|
| 51 |
+
|
| 52 |
+
return _SCORING_FUNCTIONS_CACHE
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _determine_eval_style(task):
|
| 56 |
+
"""
|
| 57 |
+
Determine the evaluation style (rule or llm) for a task.
|
| 58 |
+
"""
|
| 59 |
+
scoring_functions = _get_scoring_functions()
|
| 60 |
+
core_scoring_functions = scoring_functions["core"]
|
| 61 |
+
open_scoring_functions = scoring_functions["open"]
|
| 62 |
+
|
| 63 |
+
task_name = task["task_name"]
|
| 64 |
+
if task_name in core_scoring_functions:
|
| 65 |
+
metric_info = core_scoring_functions[task_name]
|
| 66 |
+
elif task_name in open_scoring_functions:
|
| 67 |
+
metric_info = open_scoring_functions[task_name]
|
| 68 |
+
else:
|
| 69 |
+
raise ValueError(f"Task '{task_name}' not found in either core or open datasets")
|
| 70 |
+
|
| 71 |
+
all_task_metrics = list(metric_info["field_score_function"].values())
|
| 72 |
+
eval_type = (
|
| 73 |
+
"rule"
|
| 74 |
+
if (
|
| 75 |
+
"gpt_4o_as_judge" not in all_task_metrics
|
| 76 |
+
and "ascii_art_gpt4o_judge" not in all_task_metrics
|
| 77 |
+
)
|
| 78 |
+
else "llm"
|
| 79 |
+
)
|
| 80 |
+
return eval_type
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def clear_cache():
|
| 84 |
+
"""
|
| 85 |
+
Clear the cache and force re-loading the dataset.
|
| 86 |
+
"""
|
| 87 |
+
global _DATASET_CACHE, _SCORING_FUNCTIONS_CACHE
|
| 88 |
+
_DATASET_CACHE.clear()
|
| 89 |
+
_SCORING_FUNCTIONS_CACHE.clear()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def task_list_refine(task_list):
|
| 93 |
+
task_results = []
|
| 94 |
+
for task in task_list:
|
| 95 |
+
if "mean_task_score" in task and task["mean_task_score"] != -1:
|
| 96 |
+
num_demo = 1 if len(task["example_contents"]) > 0 else 0
|
| 97 |
+
task_results.append(
|
| 98 |
+
{
|
| 99 |
+
"name": task["task_name"],
|
| 100 |
+
"score": task["mean_task_score"],
|
| 101 |
+
"eval_type": task.get("eval_type", _determine_eval_style(task)),
|
| 102 |
+
"num_demo": num_demo,
|
| 103 |
+
"num_query": len(task["query_response"]),
|
| 104 |
+
}
|
| 105 |
+
)
|
| 106 |
+
return task_results
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def derive_keyword_stats(task_results_with_meta, include_per_task_info=False):
|
| 110 |
+
"""
|
| 111 |
+
Calculate keyword-based statistics for skills, input_format, and output_format.
|
| 112 |
+
"""
|
| 113 |
+
skills_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []})
|
| 114 |
+
input_format_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []})
|
| 115 |
+
output_format_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []})
|
| 116 |
+
input_num_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []})
|
| 117 |
+
app_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []})
|
| 118 |
+
|
| 119 |
+
for task_name, task in task_results_with_meta.items():
|
| 120 |
+
task_name = task.get("original_task_name", "Unknown Task")
|
| 121 |
+
score = task.get("score", 0.0)
|
| 122 |
+
num_samples = task.get("num_query", 0) + task.get("num_demo", 0)
|
| 123 |
+
|
| 124 |
+
if score == -1:
|
| 125 |
+
continue
|
| 126 |
+
|
| 127 |
+
for skill in task.get("skills", []):
|
| 128 |
+
skills_stats[skill]["count"] += 1
|
| 129 |
+
skills_stats[skill]["total_score"] += score
|
| 130 |
+
skills_stats[skill]["num_samples"] += num_samples
|
| 131 |
+
if include_per_task_info:
|
| 132 |
+
skills_stats[skill]["tasks"].append((task_name, score))
|
| 133 |
+
|
| 134 |
+
for stat_dict, key in [
|
| 135 |
+
(input_format_stats, "input_format"),
|
| 136 |
+
(output_format_stats, "output_format"),
|
| 137 |
+
(input_num_stats, "num_input"),
|
| 138 |
+
(app_stats, "app")
|
| 139 |
+
]:
|
| 140 |
+
if value := task.get(key):
|
| 141 |
+
stat_dict[value]["count"] += 1
|
| 142 |
+
stat_dict[value]["total_score"] += score
|
| 143 |
+
stat_dict[value]["num_samples"] += num_samples
|
| 144 |
+
if include_per_task_info:
|
| 145 |
+
stat_dict[value]["tasks"].append((task_name, score))
|
| 146 |
+
|
| 147 |
+
all_stats = {
|
| 148 |
+
"skills": skills_stats,
|
| 149 |
+
"input_format": input_format_stats,
|
| 150 |
+
"output_format": output_format_stats,
|
| 151 |
+
"input_num": input_num_stats,
|
| 152 |
+
"app": app_stats,
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
for stats_dict in all_stats.values():
|
| 156 |
+
for keyword, data in stats_dict.items():
|
| 157 |
+
data["average_score"] = data["total_score"] / data["count"] if data["count"] > 0 else 0.0
|
| 158 |
+
del data["total_score"]
|
| 159 |
+
|
| 160 |
+
return dict(all_stats)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def collect_task_metadata(model_results, all_task_meta_path):
|
| 164 |
+
"""
|
| 165 |
+
Collect task metadata for a model's results using the all_task_meta.json file
|
| 166 |
+
"""
|
| 167 |
+
# Load the complete task metadata
|
| 168 |
+
with open(all_task_meta_path, "r") as f:
|
| 169 |
+
all_meta = json.load(f)
|
| 170 |
+
|
| 171 |
+
# Create result dictionary
|
| 172 |
+
all_task_meta = {}
|
| 173 |
+
|
| 174 |
+
# Match results with metadata
|
| 175 |
+
for task_result in model_results:
|
| 176 |
+
task_name = task_result["name"]
|
| 177 |
+
if task_name in all_meta:
|
| 178 |
+
meta = all_meta[task_name].copy() # Create a copy to avoid modifying original
|
| 179 |
+
meta.update(task_result)
|
| 180 |
+
all_task_meta[task_name] = meta
|
| 181 |
+
|
| 182 |
+
return all_task_meta
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/derive_breakdown_results.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import argparse
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from analysis_utils import (
|
| 5 |
+
task_list_refine,
|
| 6 |
+
collect_task_metadata,
|
| 7 |
+
derive_keyword_stats,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
def calculate_model_summary(task_results_with_meta):
|
| 11 |
+
"""
|
| 12 |
+
Re-calculate model performance summary statistics across core and open tasks.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
task_results: List of task results with scores
|
| 16 |
+
task_metadata: Dictionary containing task metadata including task types
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
Dictionary containing summary statistics for core and open tasks
|
| 20 |
+
"""
|
| 21 |
+
core_tasks = []
|
| 22 |
+
open_tasks = []
|
| 23 |
+
|
| 24 |
+
# Separate core and open tasks
|
| 25 |
+
for task in task_results_with_meta.values():
|
| 26 |
+
if task['eval_type'] == 'llm':
|
| 27 |
+
open_tasks.append(task)
|
| 28 |
+
else:
|
| 29 |
+
core_tasks.append(task)
|
| 30 |
+
|
| 31 |
+
def calculate_stats(tasks):
|
| 32 |
+
if not tasks:
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
total_samples = sum(task.get('num_query', 0) for task in tasks)
|
| 36 |
+
macro_scores = [task.get('score', 0) for task in tasks]
|
| 37 |
+
|
| 38 |
+
return {
|
| 39 |
+
"num_eval_tasks": len(tasks),
|
| 40 |
+
"num_eval_samples": total_samples,
|
| 41 |
+
"macro_mean_score": sum(macro_scores) / len(tasks) if tasks else 0,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
core_stats = calculate_stats(core_tasks)
|
| 45 |
+
open_stats = calculate_stats(open_tasks)
|
| 46 |
+
|
| 47 |
+
# Calculate overall score (weighted average based on number of tasks)
|
| 48 |
+
total_tasks = (core_stats["num_eval_tasks"] + open_stats["num_eval_tasks"])
|
| 49 |
+
overall_score = (
|
| 50 |
+
(core_stats["macro_mean_score"] * core_stats["num_eval_tasks"] +
|
| 51 |
+
open_stats["macro_mean_score"] * open_stats["num_eval_tasks"]) / total_tasks
|
| 52 |
+
if core_stats and open_stats
|
| 53 |
+
else 0
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
return {
|
| 57 |
+
"core": core_stats,
|
| 58 |
+
"open": open_stats,
|
| 59 |
+
"overall_score": overall_score
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
def merge_json_files(input_dir, output_path, key="name"):
|
| 63 |
+
"""
|
| 64 |
+
Merge multiple JSON files containing evaluation results from a directory.
|
| 65 |
+
Looks for all files matching pattern 'data_with_scores*.json'.
|
| 66 |
+
Prioritizes LLM evaluations over rule-based ones when duplicates exist.
|
| 67 |
+
"""
|
| 68 |
+
data_dict = {} # Using name as key for easy lookup and updates
|
| 69 |
+
|
| 70 |
+
# Find all matching JSON files in the directory
|
| 71 |
+
json_paths = list(Path(input_dir).glob("megabench_score*.json"))
|
| 72 |
+
print(f"Found {len(json_paths)} files to merge")
|
| 73 |
+
|
| 74 |
+
# Load and merge all JSON files
|
| 75 |
+
for path in json_paths:
|
| 76 |
+
print(f"Processing {path}")
|
| 77 |
+
with open(path, "r") as f:
|
| 78 |
+
data = json.load(f)
|
| 79 |
+
if isinstance(data, dict) and "data" in data:
|
| 80 |
+
data = task_list_refine(data["data"])
|
| 81 |
+
|
| 82 |
+
# Update or add entries
|
| 83 |
+
for item in data:
|
| 84 |
+
item_key = item[key]
|
| 85 |
+
# If new item or if new item is LLM-evaluated (prioritize LLM eval)
|
| 86 |
+
if item_key not in data_dict or (
|
| 87 |
+
item.get("eval_type") == "llm" and data_dict[item_key].get("eval_type") != "llm"
|
| 88 |
+
):
|
| 89 |
+
data_dict[item_key] = item
|
| 90 |
+
|
| 91 |
+
# Convert back to list
|
| 92 |
+
merged_data = list(data_dict.values())
|
| 93 |
+
|
| 94 |
+
# Save the merged result
|
| 95 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 96 |
+
with open(output_path, "w") as f:
|
| 97 |
+
json.dump(merged_data, f, indent=4)
|
| 98 |
+
|
| 99 |
+
print(f"Merged file with {len(merged_data)} tasks saved to {output_path}")
|
| 100 |
+
return merged_data
|
| 101 |
+
|
| 102 |
+
def main():
|
| 103 |
+
# Parse command line arguments
|
| 104 |
+
parser = argparse.ArgumentParser(description='Merge and process evaluation score files.')
|
| 105 |
+
parser.add_argument('--input_dir', type=str, help='Directory containing score files')
|
| 106 |
+
args = parser.parse_args()
|
| 107 |
+
|
| 108 |
+
# Convert path to Path object
|
| 109 |
+
input_dir = Path(args.input_dir)
|
| 110 |
+
|
| 111 |
+
# Create analysis directory under input directory
|
| 112 |
+
output_dir = input_dir / "analysis"
|
| 113 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 114 |
+
|
| 115 |
+
# Merge files
|
| 116 |
+
output_path = output_dir / "task_results.json"
|
| 117 |
+
task_results = merge_json_files(input_dir, output_path)
|
| 118 |
+
|
| 119 |
+
# Collect metadata and derive keyword stats
|
| 120 |
+
task_results_with_meta = collect_task_metadata(task_results, all_task_meta_path="all_task_meta.json")
|
| 121 |
+
keyword_stats = derive_keyword_stats(task_results_with_meta)
|
| 122 |
+
|
| 123 |
+
# Calculate model summary
|
| 124 |
+
model_summary = calculate_model_summary(task_results_with_meta)
|
| 125 |
+
|
| 126 |
+
summary_results = {
|
| 127 |
+
"model_summary": model_summary,
|
| 128 |
+
"keyword_stats": keyword_stats
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
# Save keyword stats
|
| 132 |
+
stats_output = output_dir / "summary_and_keyword_stats.json"
|
| 133 |
+
with open(stats_output, "w") as f:
|
| 134 |
+
json.dump(summary_results, f, indent=4)
|
| 135 |
+
|
| 136 |
+
print(f"\nResults saved in {output_dir}:")
|
| 137 |
+
print(f"- Merged data: {output_path}")
|
| 138 |
+
print(f"- Multi-dimensional keywords stats: {stats_output}")
|
| 139 |
+
|
| 140 |
+
if __name__ == "__main__":
|
| 141 |
+
main()
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/get_si_subset_from_full.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
For propietary models that naturally suport multi-image or video inputs, we don't run the single-image setting,
|
| 3 |
+
instead, we directly compute the SI results by extracting the subset results from the full task set to compute the stats.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import json
|
| 9 |
+
import argparse
|
| 10 |
+
from analysis_utils import (
|
| 11 |
+
derive_keyword_stats,
|
| 12 |
+
collect_task_metadata
|
| 13 |
+
)
|
| 14 |
+
from derive_breakdown_results import calculate_model_summary
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def process_subset_results(input_dir, eval_type):
|
| 18 |
+
"""Read results from the full results directory structure"""
|
| 19 |
+
task_results_path = input_dir / "analysis" / "task_results.json"
|
| 20 |
+
|
| 21 |
+
# Load task results
|
| 22 |
+
with open(task_results_path, "r") as f:
|
| 23 |
+
task_results = json.load(f)
|
| 24 |
+
|
| 25 |
+
results_with_meta = collect_task_metadata(task_results, all_task_meta_path="all_task_meta.json")
|
| 26 |
+
|
| 27 |
+
# Filter tasks by eval_type
|
| 28 |
+
filtered_results_with_meta = {task_name:task for task_name, task in results_with_meta.items() if task["eval_type"] == eval_type and task["num_input"] == "1-image"}
|
| 29 |
+
filtered_results = [task for task in task_results if task["name"] in filtered_results_with_meta]
|
| 30 |
+
|
| 31 |
+
if not filtered_results:
|
| 32 |
+
print(f"Warning: No tasks found in {input_dir} with eval_type {eval_type}")
|
| 33 |
+
return None, None, None
|
| 34 |
+
|
| 35 |
+
# Calculate summary statistics
|
| 36 |
+
num_tasks = len(filtered_results)
|
| 37 |
+
total_queries = sum(task["num_query"] for task in filtered_results)
|
| 38 |
+
total_correct = sum(round(task["score"] * task["num_query"]) for task in filtered_results)
|
| 39 |
+
|
| 40 |
+
summary = {
|
| 41 |
+
"num_eval_tasks": num_tasks,
|
| 42 |
+
"num_eval_samples": total_queries,
|
| 43 |
+
"macro_mean_score": sum(task["score"] for task in filtered_results) / num_tasks,
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
return filtered_results, filtered_results_with_meta, summary
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def main(input_dir, output_dir):
|
| 50 |
+
# Process core and open set results
|
| 51 |
+
filtered_tasks_core, filtered_tasks_core_with_meta, _ = process_subset_results(input_dir, "rule")
|
| 52 |
+
filtered_tasks_open, filtered_tasks_open_with_meta, _ = process_subset_results(input_dir, "llm")
|
| 53 |
+
|
| 54 |
+
if filtered_tasks_core and filtered_tasks_open:
|
| 55 |
+
task_results = filtered_tasks_core + filtered_tasks_open
|
| 56 |
+
task_results_with_meta = {**filtered_tasks_core_with_meta, **filtered_tasks_open_with_meta}
|
| 57 |
+
|
| 58 |
+
# Save task results
|
| 59 |
+
with open(output_dir / "task_results.json", "w") as f:
|
| 60 |
+
json.dump(task_results, f, indent=4)
|
| 61 |
+
|
| 62 |
+
# Collect metadata and derive keyword stats
|
| 63 |
+
keyword_stats = derive_keyword_stats(task_results_with_meta)
|
| 64 |
+
|
| 65 |
+
# Calculate model summary
|
| 66 |
+
model_summary = calculate_model_summary(task_results_with_meta)
|
| 67 |
+
|
| 68 |
+
summary_results = {
|
| 69 |
+
"model_summary": model_summary,
|
| 70 |
+
"keyword_stats": keyword_stats
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
# Save keyword stats
|
| 74 |
+
stats_output = output_dir / "summary_and_keyword_stats.json"
|
| 75 |
+
with open(stats_output, "w") as f:
|
| 76 |
+
json.dump(summary_results, f, indent=4)
|
| 77 |
+
|
| 78 |
+
print(f"\nResults saved in {output_dir}")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
if __name__ == "__main__":
|
| 83 |
+
parser = argparse.ArgumentParser()
|
| 84 |
+
parser.add_argument("--input_dir", type=str, required=True, help="Path to the input directory containing full results")
|
| 85 |
+
parser.add_argument("--output_dir", type=str, required=True, help="Path to the output directory")
|
| 86 |
+
args = parser.parse_args()
|
| 87 |
+
|
| 88 |
+
input_dir = Path(args.input_dir)
|
| 89 |
+
output_dir = Path(args.output_dir)
|
| 90 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 91 |
+
|
| 92 |
+
main(input_dir, output_dir)
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mmdu.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
|
| 3 |
+
meta_prompt = """
|
| 4 |
+
You are an assistant skilled at evaluating the quality of creative text.
|
| 5 |
+
Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to \
|
| 6 |
+
the user question displayed below. You'll need to assess the response on the following dimensions: \
|
| 7 |
+
Creativity, Richness, Visual Perception, Logical Coherence, Answer Accuracy and Image Relationship Understanding. \
|
| 8 |
+
We will provide you with a creative question and the AI model's response and a reference answer for your evaluation. \
|
| 9 |
+
As you begin your assessment, follow this process:
|
| 10 |
+
1. Evaluate the AI model's answers on different dimensions, pointing out its strengths or weaknesses \
|
| 11 |
+
in each dimension and assigning a score of 1 to 10 for each.
|
| 12 |
+
2. Finally, based on the assessments across dimensions, \
|
| 13 |
+
provide an overall score of 1 to 10 for the AI model's response.
|
| 14 |
+
3. Your scoring should be as stringent as possible and follow the scoring rules below:
|
| 15 |
+
In general, the higher the quality of the model's response and its strict adherence to user needs, \
|
| 16 |
+
the higher the score. Responses that do not meet user needs will receive lower scores.
|
| 17 |
+
Scoring rules:
|
| 18 |
+
Creativity:
|
| 19 |
+
Scores 1-2 when there is no innovation or uniqueness in the content.
|
| 20 |
+
Scores 3-4 when providing partially original content but with low creative quality.
|
| 21 |
+
Scores 5-6 when mostly creative but lacks significant novelty, with moderate quality.
|
| 22 |
+
Scores 7-8 when having novelty and high-quality content.
|
| 23 |
+
Scores 9-10 when highly novel and of exceptional quality compared to the reference answer.
|
| 24 |
+
Richness:
|
| 25 |
+
Scores 1-2 when lacking depth and breadth, with very limited information.
|
| 26 |
+
Scores 3-4 when limited in depth and breadth, with fewer explanations and examples, showing low diversity.
|
| 27 |
+
Scores 5-6 when limited in depth and breadth but provides basic necessary information.
|
| 28 |
+
Scores 7-8 when providing depth and useful additional information.
|
| 29 |
+
Scores 9-10 when providing exceptional depth, breadth, and high diversity compared to the reference answer.
|
| 30 |
+
Visual Perception:
|
| 31 |
+
Scores 1-2 when the description of the visual information in the image contains errors or \
|
| 32 |
+
is significantly inconsistent with the content of the image.
|
| 33 |
+
Scores 3-4 When the description of the visual information in the image reflects only a small amount \
|
| 34 |
+
of the image's information and contains some errors.
|
| 35 |
+
Scores 5-6 when the description of the visual information in the image includes the basic information \
|
| 36 |
+
of the image but contains minimal information.
|
| 37 |
+
Scores 7-8 when the description of the visual information in the image matches the image well and is rich in content, \
|
| 38 |
+
providing a substantial amount of information about the image.
|
| 39 |
+
Scores 9-10 when the description of the visual information in the image not only matches the image \
|
| 40 |
+
but also is more detailed and informative compared to the reference answer, providing more information about the image.
|
| 41 |
+
Logical Coherence:
|
| 42 |
+
Scores 1-2 when entirely incoherent, lacking any logic, and not matching the question or known information.
|
| 43 |
+
Scores 3-4 when somewhat coherent but with many logical errors or inconsistencies.
|
| 44 |
+
Scores 5-6 when mostly coherent, with few errors, but may struggle to maintain complete coherence in complex situations.
|
| 45 |
+
Scores 7-8 when excellent logical handling, very few errors.
|
| 46 |
+
Scores 9-10 when flawless logic, impeccable in handling complexity, \
|
| 47 |
+
and significantly higher logical coherence compared to the reference answer.
|
| 48 |
+
Answer Accuracy:
|
| 49 |
+
Scores 1-2 when the answer is significantly inconsistent with the question or contains obvious errors.
|
| 50 |
+
Scores 3-4 when the answer is partially correct but contains some errors or is incomplete.
|
| 51 |
+
Scores 5-6 when the answer is basically correct but lacks details or is not sufficiently detailed.
|
| 52 |
+
Scores 7-8 when the answer is accurate and detailed, fully corresponding to the question.
|
| 53 |
+
Scores 9-10 when the answer is not only accurate and detailed but also provides additional useful information, \
|
| 54 |
+
exceeding expectations.
|
| 55 |
+
Image Relationship Understanding:
|
| 56 |
+
Scores 1-2 when there are significant errors or confusion in distinguishing and describing different images, \
|
| 57 |
+
unable to correctly identify and relate the content of the images.
|
| 58 |
+
Scores 3-4 when the description of different images reflects only minimal distinguishing information, \
|
| 59 |
+
contains some errors and confusion, and fails to clearly differentiate and relate the images.
|
| 60 |
+
Scores 5-6 when the description of different images includes basic distinguishing information, \
|
| 61 |
+
is able to correctly identify and relate the images in a basic manner, \
|
| 62 |
+
but the information provided is minimal and lacks detail.
|
| 63 |
+
Scores 7-8 when the description of different images is accurate and detailed, \
|
| 64 |
+
clearly distinguishing and relating the images, \
|
| 65 |
+
with rich content that points out the main commonalities and differences between the images.
|
| 66 |
+
Scores 9-10 when the description of different images is not only accurate and detailed but also \
|
| 67 |
+
provides richer information and analysis, clearly distinguishing and relating the images, \
|
| 68 |
+
more comprehensively pointing out the commonalities and differences \
|
| 69 |
+
between the images compared to the reference answer.
|
| 70 |
+
Overall Score:
|
| 71 |
+
Scores 1-2 when irrelevant to the question, factually incorrect, or generates harmful content.
|
| 72 |
+
Scores 3-4 when no serious errors, mostly harmless, but of low quality and does not meet requirements.
|
| 73 |
+
Scores 5-6 when basically meeting requirements but performing poorly in some dimensions, with moderate quality.
|
| 74 |
+
Scores 7-8 when performing well in all dimensions.
|
| 75 |
+
Scores 9-10 when fully addressing user questions and all requirements, significantly surpassing the reference answer.
|
| 76 |
+
Please remember, you must evaluate and explain before scoring. After your explanation for each dimension, \
|
| 77 |
+
add the score for that dimension. Finally, at the end of your response, \
|
| 78 |
+
in the format of the dictionary (including brackets), return all your scoring results, \
|
| 79 |
+
ensuring your scores are integers:
|
| 80 |
+
{'Dimension One': Score, 'Dimension Two': Score, ..., 'Overall Score': Score}, \
|
| 81 |
+
for example: {'Creativity': 9, 'Richness': 6, ..., 'Overall Score': 7}.\n
|
| 82 |
+
"""
|
| 83 |
+
question_begin_prompt = '[Question]'
|
| 84 |
+
reference_begin_prompt = '[The Start of Reference Answer]'
|
| 85 |
+
reference_end_prompt = '[The End of Reference Answer]'
|
| 86 |
+
answers_begin_prompt = '[The Start of Assistant’s Answer]'
|
| 87 |
+
answers_end_prompt = '[The End of Assistant’s Answer]'
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def mmdu_score(model, line):
|
| 91 |
+
question = eval(line['question'])
|
| 92 |
+
gt = eval(line['answer'])
|
| 93 |
+
prediction = eval(line['prediction'])
|
| 94 |
+
|
| 95 |
+
DIMS = [
|
| 96 |
+
'Creativity', 'Richness', 'Visual Perception', 'Logical Coherence',
|
| 97 |
+
'Answer Accuracy', 'Image Relationship Understanding', 'Overall Score'
|
| 98 |
+
]
|
| 99 |
+
|
| 100 |
+
all_result_dict = []
|
| 101 |
+
logs = []
|
| 102 |
+
for j in range(len(question)):
|
| 103 |
+
try:
|
| 104 |
+
prompt = meta_prompt + question_begin_prompt + '\n' + question[j] + '\n\n' + \
|
| 105 |
+
reference_begin_prompt + '\n' + gt[j] + '\n' + reference_end_prompt + '\n\n' + \
|
| 106 |
+
answers_begin_prompt + '\n' + prediction[j] + '\n' + answers_end_prompt
|
| 107 |
+
response = model.generate(prompt)
|
| 108 |
+
start_index = response.find('{')
|
| 109 |
+
end_index = response.rfind('}') + 1
|
| 110 |
+
dictionary_str = response[start_index: end_index]
|
| 111 |
+
result_dict = eval(dictionary_str)
|
| 112 |
+
all_result_dict.append(result_dict)
|
| 113 |
+
if all([x in result_dict for x in DIMS]):
|
| 114 |
+
logs.append('Succeed')
|
| 115 |
+
else:
|
| 116 |
+
logs.append(
|
| 117 |
+
f'Following Dims are not in results of turn {j}: '
|
| 118 |
+
f'{",".join([x for x in DIMS if x not in result_dict])}'
|
| 119 |
+
)
|
| 120 |
+
except Exception as e:
|
| 121 |
+
logging.warning(str(e))
|
| 122 |
+
all_result_dict.append({d: None for d in DIMS})
|
| 123 |
+
logs.append(str(e))
|
| 124 |
+
|
| 125 |
+
df = pd.DataFrame(all_result_dict)
|
| 126 |
+
return dict(res=df, log='\n'.join(logs))
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
import evaluate
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os.path as osp
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from vlmeval import load, dump, track_progress_rich
|
| 8 |
+
|
| 9 |
+
g_eval_prompt = """
|
| 10 |
+
You will be given a oracle caption that describes an image. You will then be given a second caption written for the same image.
|
| 11 |
+
|
| 12 |
+
Your task is to rate the second caption on one metric.
|
| 13 |
+
|
| 14 |
+
Evaluation Criteria:
|
| 15 |
+
|
| 16 |
+
Relevance (1-5) - The extent to which the second caption is relevant to the key elements and context described in the oracle caption. A relevant caption should focus on the same subjects, objects, actions, or context highlighted in the oracle caption, without introducing unrelated or extraneous details.
|
| 17 |
+
|
| 18 |
+
Evaluation Steps:
|
| 19 |
+
|
| 20 |
+
1. Review the Oracle Caption: Carefully read the oracle caption to understand the main elements and context it describes.
|
| 21 |
+
2. Review the Second Caption: Assess whether the second caption focuses on the same key elements and context as the oracle caption. Evaluate if the second caption stays on topic and does not introduce irrelevant details.
|
| 22 |
+
3. Assign a Score for Relevance: Based on the Evaluation Criteria, rate how relevant the second caption is to the oracle caption's description of the same image.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
generate_atomic_facts_sys_prompt = """
|
| 26 |
+
You will be given a caption for a figure containing multiple panels, which includes descriptions for the entire figure as well as each individual panel. Your task is to break down the caption into separate, independent descriptions for the entire figure and each panel, formatted appropriately and separated by '-'
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
generate_atomic_facts_usr_prompt_one_shot = """
|
| 30 |
+
The figure consists of four sub-figures labeled a, b, c, and d. All four images appear to be scanning electron microscope (SEM) images showing the microstructure of different materials, likely related to the iron-based cathode catalysts described in the article.\n\na. This image shows a highly porous structure with interconnected particles forming a network. The particles appear to be in the nanometer to micrometer size range. The scale bar indicates 1 \u03bcm.\n\nb. This image displays a closer view of what seems to be a similar material to (a), but at a higher magnification. The individual particles are more clearly visible, showing a rough, granular texture. The scale bar indicates 200 nm.\n\nc. This image reveals a different morphology compared to (a) and (b). It shows larger, more consolidated structures with a rougher surface texture. There are still visible pores and gaps between the structures. The scale bar indicates 1 \u03bcm.\n\nd. This final image appears to be a cross-sectional view of a porous material, possibly showing the internal structure of the catalyst. It reveals a highly interconnected network of pores and channels throughout the material. The scale bar indicates 200 nm.\n\nThese images likely represent different stages or variations of the iron-acetate/phenanthroline/zeolitic-imidazolate-framework-derived electrocatalyst mentioned in the article. The varying structures and porosities shown in these images could be related to the enhanced mass-transport properties and increased volumetric activity described in the text.
|
| 31 |
+
Model_Response:
|
| 32 |
+
- The figure consists of four sub-figures labeled a, b, c, and d.
|
| 33 |
+
- All four images appear to be scanning electron microscope (SEM) images.
|
| 34 |
+
- The images show the microstructure of different materials.
|
| 35 |
+
- The materials are likely related to the iron-based cathode catalysts described in the article.
|
| 36 |
+
- Image a shows a highly porous structure with interconnected particles forming a network.
|
| 37 |
+
- The particles in image a are in the nanometer to micrometer size range.
|
| 38 |
+
- The scale bar in image a indicates 1 μm.
|
| 39 |
+
- Image b displays a closer view of a material similar to that in image a but at higher magnification.
|
| 40 |
+
- The individual particles in image b are more clearly visible and show a rough, granular texture.
|
| 41 |
+
- The scale bar in image b indicates 200 nm.
|
| 42 |
+
- Image c shows larger, more consolidated structures with a rougher surface texture.
|
| 43 |
+
- There are visible pores and gaps between the structures in image c.
|
| 44 |
+
- The scale bar in image c indicates 1 μm.
|
| 45 |
+
- Image d appears to be a cross-sectional view of a porous material.
|
| 46 |
+
- Image d reveals the internal structure of the catalyst with a highly interconnected network of pores and channels.
|
| 47 |
+
- The scale bar in image d indicates 200 nm.
|
| 48 |
+
- These images likely represent different stages or variations of the iron-acetate/phenanthroline/zeolitic-imidazolate-framework-derived electrocatalyst mentioned in the article.
|
| 49 |
+
- The varying structures and porosities shown in these images could be related to the enhanced mass-transport properties described in the text.
|
| 50 |
+
- The varying structures and porosities in the images may contribute to increased volumetric activity described in the article.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
from pycocoevalcap.cider.cider import Cider
|
| 54 |
+
cider_scorer = Cider()
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def compute_cider(predictions, references):
|
| 58 |
+
"""
|
| 59 |
+
predictions: list of strings
|
| 60 |
+
references: list of list of strings
|
| 61 |
+
"""
|
| 62 |
+
return cider_scorer.compute_score([{i: [ref for ref in refs]} for i, refs in enumerate(references)],
|
| 63 |
+
{i: [pred] for i, pred in enumerate(predictions)})[0]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_all_metrics_for_reference_based_metrics(
|
| 67 |
+
references, candidates, image_id_list, reference_based_metrics_file,
|
| 68 |
+
):
|
| 69 |
+
# Initialize the metrics
|
| 70 |
+
existing_data = load(reference_based_metrics_file) if osp.exists(reference_based_metrics_file) else {}
|
| 71 |
+
bleu_metric = evaluate.load("bleu")
|
| 72 |
+
rouge_metric = evaluate.load("rouge")
|
| 73 |
+
meteor_metric = evaluate.load("meteor")
|
| 74 |
+
bertscore_metric = evaluate.load("bertscore")
|
| 75 |
+
# cider_metric = evaluate.load("cider")
|
| 76 |
+
|
| 77 |
+
# bleu_scores = [[], [], [], [], []] # B1, B2, B3, B4, BLEU
|
| 78 |
+
# rouge_scores = [[], [], [], []] # ROUGE1, ROUGE2, ROUGEL, ROUGELSUM
|
| 79 |
+
# meteor_scores = []
|
| 80 |
+
# bertscore_scores = []
|
| 81 |
+
|
| 82 |
+
# Calculate scores for each sample
|
| 83 |
+
idx = 1
|
| 84 |
+
print(f"Calculating metrics for {len(references)} samples")
|
| 85 |
+
assert len(references) == len(candidates) == len(image_id_list)
|
| 86 |
+
for ref, cand, image_id in tqdm(zip(references, candidates, image_id_list)):
|
| 87 |
+
if not cand.strip():
|
| 88 |
+
print(cand)
|
| 89 |
+
continue
|
| 90 |
+
|
| 91 |
+
default_bleu_score = {"bleu": 0.0, "precisions": [0.0, 0.0, 0.0, 0.0]}
|
| 92 |
+
if image_id not in existing_data:
|
| 93 |
+
existing_data[image_id] = {}
|
| 94 |
+
bleu_score = existing_data.get(image_id, {}).get('bleu_score', default_bleu_score)
|
| 95 |
+
|
| 96 |
+
if bleu_score == default_bleu_score:
|
| 97 |
+
try:
|
| 98 |
+
bleu_score = bleu_metric.compute(predictions=[cand], references=ref)
|
| 99 |
+
except:
|
| 100 |
+
bleu_score = default_bleu_score
|
| 101 |
+
existing_data[image_id]['bleu_score'] = bleu_score
|
| 102 |
+
|
| 103 |
+
default_rouge_score = {
|
| 104 |
+
"rouge1": 0.0,
|
| 105 |
+
"rouge2": 0.0,
|
| 106 |
+
"rougeL": 0.0,
|
| 107 |
+
"rougeLsum": 0.0,
|
| 108 |
+
}
|
| 109 |
+
rouge_score = existing_data.get(image_id, {}).get('rouge_score', default_rouge_score)
|
| 110 |
+
if rouge_score == default_rouge_score:
|
| 111 |
+
try:
|
| 112 |
+
rouge_score = rouge_metric.compute(predictions=[cand], references=ref)
|
| 113 |
+
except:
|
| 114 |
+
rouge_score = default_rouge_score
|
| 115 |
+
existing_data[image_id]['rouge_score'] = rouge_score
|
| 116 |
+
|
| 117 |
+
# meteor score
|
| 118 |
+
default_meteor_score = 0.0
|
| 119 |
+
meteor_score = existing_data.get(image_id, {}).get('meteor_score', default_meteor_score)
|
| 120 |
+
if meteor_score == default_meteor_score:
|
| 121 |
+
try:
|
| 122 |
+
meteor_score = meteor_metric.compute(predictions=[cand], references=ref)[
|
| 123 |
+
"meteor"
|
| 124 |
+
]
|
| 125 |
+
except:
|
| 126 |
+
meteor_score = default_meteor_score
|
| 127 |
+
existing_data[image_id]['meteor_score'] = meteor_score
|
| 128 |
+
|
| 129 |
+
# bertscore
|
| 130 |
+
default_bertscore_score = {"f1": [0.0]}
|
| 131 |
+
bertscore_score = existing_data.get(image_id, {}).get('bertscore_score', default_bertscore_score)
|
| 132 |
+
if bertscore_score == default_bertscore_score:
|
| 133 |
+
try:
|
| 134 |
+
bertscore_score = bertscore_metric.compute(
|
| 135 |
+
predictions=[cand], references=ref, lang="en"
|
| 136 |
+
)
|
| 137 |
+
except:
|
| 138 |
+
bertscore_score = default_bertscore_score
|
| 139 |
+
existing_data[image_id]['bertscore_score'] = bertscore_score
|
| 140 |
+
|
| 141 |
+
# cider score
|
| 142 |
+
default_cider_score = 0.0
|
| 143 |
+
cider_score = existing_data.get(image_id, {}).get('cider_score', default_cider_score)
|
| 144 |
+
if cider_score == default_cider_score:
|
| 145 |
+
try:
|
| 146 |
+
# cider_score = cider_metric.compute(predictions=[cand], references=ref)["score"]
|
| 147 |
+
cider_score = compute_cider([cand], [ref])
|
| 148 |
+
except:
|
| 149 |
+
cider_score = default_cider_score
|
| 150 |
+
existing_data[image_id]['cider_score'] = cider_score
|
| 151 |
+
|
| 152 |
+
if idx % 50 == 0:
|
| 153 |
+
print(f"Saving 50 samples to {reference_based_metrics_file}")
|
| 154 |
+
dump(existing_data, reference_based_metrics_file)
|
| 155 |
+
|
| 156 |
+
idx += 1
|
| 157 |
+
|
| 158 |
+
dump(existing_data, reference_based_metrics_file)
|
| 159 |
+
print(f"Saved all samples to {reference_based_metrics_file}")
|
| 160 |
+
|
| 161 |
+
return existing_data
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def llm_openai_judge(prediction, reference, dimension, prompt, model):
|
| 165 |
+
|
| 166 |
+
if isinstance(reference, list):
|
| 167 |
+
reference = reference[0]
|
| 168 |
+
assert isinstance(prediction, str)
|
| 169 |
+
|
| 170 |
+
if dimension == "fluency":
|
| 171 |
+
sys_prompt = prompt.format(Second=prediction) # no reference
|
| 172 |
+
usr_prompt = f"Caption:\n{prediction}\n"
|
| 173 |
+
else:
|
| 174 |
+
sys_prompt = prompt.format(Target=reference, Second=prediction)
|
| 175 |
+
usr_prompt = f"Oracle Caption:\n{reference}\n\nSecond Caption:\n{prediction}\n\n"
|
| 176 |
+
usr_prompt += f"What is the {dimension} score (1-5)? Return the score ONLY!"
|
| 177 |
+
|
| 178 |
+
model.system_prompt = sys_prompt
|
| 179 |
+
|
| 180 |
+
answer = 0
|
| 181 |
+
all_responses = model.generate(usr_prompt)
|
| 182 |
+
|
| 183 |
+
for response in all_responses.choices:
|
| 184 |
+
response = response.message.content.strip()
|
| 185 |
+
try:
|
| 186 |
+
answer = int(response)
|
| 187 |
+
break
|
| 188 |
+
except:
|
| 189 |
+
for s in ["1", "2", "3", "4", "5"]:
|
| 190 |
+
if s in response:
|
| 191 |
+
answer = int(s)
|
| 192 |
+
break
|
| 193 |
+
return answer
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def g_eval_generate(evaluator, usr_prompt):
|
| 197 |
+
return evaluator.generate(usr_prompt)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def get_all_metrics_for_g_eval_score(references, candidates, evaluator, tmp_file, nproc=4):
|
| 201 |
+
# relevance scores
|
| 202 |
+
assert len(references) == len(candidates)
|
| 203 |
+
tups = []
|
| 204 |
+
indices = range(len(references))
|
| 205 |
+
for reference, prediction in tqdm(zip(references, candidates)):
|
| 206 |
+
dimension = "relevance"
|
| 207 |
+
if isinstance(reference, list):
|
| 208 |
+
reference = reference[0]
|
| 209 |
+
assert isinstance(prediction, str)
|
| 210 |
+
|
| 211 |
+
sys_prompt = g_eval_prompt.format(Target=reference, Second=prediction)
|
| 212 |
+
usr_prompt = f"Oracle Caption:\n{reference}\n\nSecond Caption:\n{prediction}\n\n"
|
| 213 |
+
usr_prompt += f"What is the {dimension} score (1-5)? Return the score ONLY!"
|
| 214 |
+
|
| 215 |
+
evaluator.system_prompt = sys_prompt
|
| 216 |
+
tups.append((evaluator, usr_prompt))
|
| 217 |
+
|
| 218 |
+
ans = {}
|
| 219 |
+
if osp.exists(tmp_file):
|
| 220 |
+
ans = load(tmp_file)
|
| 221 |
+
ans = {k: v for k, v in ans.items() if evaluator.fail_msg not in str(v)}
|
| 222 |
+
tups = [x for x, i in zip(tups, indices) if i not in ans]
|
| 223 |
+
indices = [i for i in indices if i not in ans]
|
| 224 |
+
|
| 225 |
+
if len(indices):
|
| 226 |
+
_ = track_progress_rich(
|
| 227 |
+
g_eval_generate,
|
| 228 |
+
tups,
|
| 229 |
+
nproc=nproc,
|
| 230 |
+
chunksize=nproc,
|
| 231 |
+
keys=indices,
|
| 232 |
+
save=tmp_file,
|
| 233 |
+
)
|
| 234 |
+
ans = load(tmp_file)
|
| 235 |
+
return ans
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def merge_rating(refer_based_metrics_output_file_name, g_eval_metrics_output_file_name, fact_score_metrics_output_file):
|
| 239 |
+
# Update metrics columns for each row
|
| 240 |
+
refer_based_metrics_output_file = load(refer_based_metrics_output_file_name)
|
| 241 |
+
g_eval_metrics_output_file = load(g_eval_metrics_output_file_name)
|
| 242 |
+
# fact_score_metrics_output_file = load(fact_score_metrics_output_file)
|
| 243 |
+
for idx, item in refer_based_metrics_output_file.iterrows():
|
| 244 |
+
ref_based_metrics = eval(item['reference_based_metrics'])
|
| 245 |
+
refer_based_metrics_output_file.at[idx, 'B2'] = ref_based_metrics['bleu_score']['precisions'][1] * 100
|
| 246 |
+
refer_based_metrics_output_file.at[idx, 'RL'] = ref_based_metrics['rouge_score']['rougeL'] * 100
|
| 247 |
+
refer_based_metrics_output_file.at[idx, 'M'] = ref_based_metrics['meteor_score'] * 100
|
| 248 |
+
refer_based_metrics_output_file.at[idx, 'CD'] = ref_based_metrics['cider_score'] * 100
|
| 249 |
+
refer_based_metrics_output_file.at[idx, 'BS'] = ref_based_metrics['bertscore_score']['f1'][0] * 100
|
| 250 |
+
refer_based_metrics_output_file.at[idx, 'GE'] = g_eval_metrics_output_file.iloc[idx]['g_eval_metrics']
|
| 251 |
+
# refer_based_metrics_output_file.at[idx, 'FS'] = eval(fact_score_metrics_output_file.iloc[idx]['fact_score_metrics'])['score']
|
| 252 |
+
|
| 253 |
+
# df = refer_based_metrics_output_file
|
| 254 |
+
# # Filter rows for the two settings
|
| 255 |
+
# df_filtered = df[df['setting'].isin(['w/o.Abstract_w/o.Content', 'w.Abstract_w/o.Content'])]
|
| 256 |
+
# # metrics = ['B2', 'RL', 'M', 'BS', 'CD', 'GE']
|
| 257 |
+
# metrics = ['B2', 'RL', 'M', 'BS', 'GE']
|
| 258 |
+
|
| 259 |
+
# # Group by subject and setting, averaging metrics
|
| 260 |
+
# grouped = df_filtered.groupby(['subject', 'setting'])[metrics].mean()
|
| 261 |
+
|
| 262 |
+
# # Pivot to get multi-level columns: top level is setting, sub-level is metric
|
| 263 |
+
# result_df = grouped.unstack(level='setting')
|
| 264 |
+
# # Swap levels so that top level is setting
|
| 265 |
+
# result_df.columns = result_df.columns.swaplevel(0, 1)
|
| 266 |
+
# # Ensure columns order matches required settings
|
| 267 |
+
# result_df = result_df.reindex(columns=['w/o.Abstract_w/o.Content', 'w.Abstract_w/o.Content'], level=0)
|
| 268 |
+
|
| 269 |
+
# # Reset index to have subject as a column
|
| 270 |
+
# result_df = result_df.reset_index()
|
| 271 |
+
# return result_df
|
| 272 |
+
|
| 273 |
+
df = refer_based_metrics_output_file
|
| 274 |
+
|
| 275 |
+
# metrics = ['B2', 'RL', 'M', 'BS', 'CD', 'FS', 'GE']
|
| 276 |
+
metrics = ['B2', 'RL', 'M', 'BS', 'CD', 'GE']
|
| 277 |
+
|
| 278 |
+
subject_df = df.groupby('subject')[metrics].mean().reset_index()
|
| 279 |
+
|
| 280 |
+
category_df = df.groupby('category')[metrics].mean().reset_index()
|
| 281 |
+
category_df.rename(columns={'category': 'subject'}, inplace=True)
|
| 282 |
+
category_df['subject'] = 'CATEGORY_' + category_df['subject']
|
| 283 |
+
|
| 284 |
+
overall_row = df[metrics].mean().to_frame().T
|
| 285 |
+
overall_row.insert(0, 'subject', 'Overall')
|
| 286 |
+
|
| 287 |
+
result_df = pd.concat([subject_df, category_df, overall_row], ignore_index=True)
|
| 288 |
+
|
| 289 |
+
return result_df
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def dump_multiindex_excel(df, path):
|
| 293 |
+
from openpyxl import Workbook
|
| 294 |
+
from openpyxl.styles import Alignment
|
| 295 |
+
from openpyxl.utils.dataframe import dataframe_to_rows
|
| 296 |
+
|
| 297 |
+
wb = Workbook()
|
| 298 |
+
ws = wb.active
|
| 299 |
+
ws.title = "Sheet1"
|
| 300 |
+
|
| 301 |
+
ws.cell(row=1, column=1, value="subject")
|
| 302 |
+
col_levels = df.columns.levels
|
| 303 |
+
col_codes = df.columns.codes
|
| 304 |
+
level0_labels = [col_levels[0][i] for i in col_codes[0]]
|
| 305 |
+
level1_labels = [col_levels[1][i] for i in col_codes[1]]
|
| 306 |
+
unique_level0 = list(dict.fromkeys(level0_labels))
|
| 307 |
+
|
| 308 |
+
col_offset = 2
|
| 309 |
+
start = col_offset
|
| 310 |
+
for lvl0 in unique_level0:
|
| 311 |
+
count = level0_labels.count(lvl0)
|
| 312 |
+
end = start + count - 1
|
| 313 |
+
ws.merge_cells(start_row=1, start_column=start, end_row=1, end_column=end)
|
| 314 |
+
ws.cell(row=1, column=start, value=lvl0)
|
| 315 |
+
start = end + 1
|
| 316 |
+
|
| 317 |
+
ws.cell(row=2, column=1, value="subject")
|
| 318 |
+
for idx, label in enumerate(level1_labels):
|
| 319 |
+
ws.cell(row=2, column=col_offset + idx, value=label)
|
| 320 |
+
|
| 321 |
+
for r_idx, row in enumerate(df.itertuples(index=False), start=3):
|
| 322 |
+
ws.cell(row=r_idx, column=1, value=row[0]) # subject
|
| 323 |
+
for c_idx, val in enumerate(row[1:], start=2):
|
| 324 |
+
ws.cell(row=r_idx, column=c_idx, value=val)
|
| 325 |
+
|
| 326 |
+
for row in ws.iter_rows():
|
| 327 |
+
for cell in row:
|
| 328 |
+
cell.alignment = Alignment(horizontal='center', vertical='center')
|
| 329 |
+
|
| 330 |
+
wb.save(path)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def extract_int(s, scales=[0, 5]):
|
| 334 |
+
# Regular expression to find float numbers between 0 and 1, including 0 and 1
|
| 335 |
+
for score in range(scales[0], scales[1] + 1):
|
| 336 |
+
if str(score) in s:
|
| 337 |
+
return int(score)
|
| 338 |
+
return None
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def get_score(
|
| 342 |
+
model,
|
| 343 |
+
content,
|
| 344 |
+
gamma=10,
|
| 345 |
+
scales=[0, 5],
|
| 346 |
+
atomic_facts=None,
|
| 347 |
+
):
|
| 348 |
+
|
| 349 |
+
total_decisions = []
|
| 350 |
+
scores = []
|
| 351 |
+
init_scores = []
|
| 352 |
+
|
| 353 |
+
decisions = []
|
| 354 |
+
for atom in atomic_facts:
|
| 355 |
+
if not atom:
|
| 356 |
+
continue
|
| 357 |
+
atom = atom.strip()
|
| 358 |
+
|
| 359 |
+
definition = f"How relevant is the generated caption to the given human-written caption for the figure? Determine whether the information in the generated caption is included or mentioned in the human-written caption. Respond with a score between {scales[0]} and {scales[1]}."
|
| 360 |
+
|
| 361 |
+
prompt = "Human-written caption: {}\n\nGenerated caption: {}\n\nHow relevant is the generated caption to the given human-written caption? Respond with ONLY a score between {} and {}.".format(
|
| 362 |
+
content, atom, scales[0], scales[1]
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
model.system_prompt = definition
|
| 366 |
+
outputs = model.generate(prompt)
|
| 367 |
+
|
| 368 |
+
generated_answer = outputs.lower().strip()
|
| 369 |
+
|
| 370 |
+
generated_score = extract_int(generated_answer, scales=scales)
|
| 371 |
+
|
| 372 |
+
if generated_score is None:
|
| 373 |
+
generated_score = 0.0
|
| 374 |
+
else:
|
| 375 |
+
try:
|
| 376 |
+
# normalize the score to 0-1
|
| 377 |
+
generated_score = float(generated_score) / scales[1]
|
| 378 |
+
except Exception as e:
|
| 379 |
+
print("Error:", e)
|
| 380 |
+
generated_score = 0.0
|
| 381 |
+
|
| 382 |
+
print("Atom score:", generated_score)
|
| 383 |
+
|
| 384 |
+
decisions.append({"atom": atom, "is_supported": generated_score})
|
| 385 |
+
|
| 386 |
+
score = np.mean([d["is_supported"] for d in decisions])
|
| 387 |
+
if gamma:
|
| 388 |
+
init_scores.append(score)
|
| 389 |
+
penalty = (
|
| 390 |
+
1.0 if len(atomic_facts) > gamma else np.exp(1 - gamma / len(atomic_facts))
|
| 391 |
+
)
|
| 392 |
+
score = penalty * score
|
| 393 |
+
|
| 394 |
+
total_decisions.append(decisions)
|
| 395 |
+
scores.append(score)
|
| 396 |
+
print("Final score:", score)
|
| 397 |
+
|
| 398 |
+
out = {
|
| 399 |
+
"score": np.mean(scores),
|
| 400 |
+
"decisions": total_decisions,
|
| 401 |
+
"num_facts_per_response": np.mean(
|
| 402 |
+
[len(d) for d in total_decisions if d is not None]
|
| 403 |
+
),
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
if gamma:
|
| 407 |
+
out["init_score"] = np.mean(init_scores)
|
| 408 |
+
|
| 409 |
+
return out
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def fact_score_generate(model, line):
|
| 413 |
+
generated_caption = line['prediction']
|
| 414 |
+
model.system_prompt = generate_atomic_facts_sys_prompt
|
| 415 |
+
mes = [
|
| 416 |
+
dict(type='text', value=generate_atomic_facts_usr_prompt_one_shot),
|
| 417 |
+
dict(type='text', value=generated_caption + '\nModel_Response:'),
|
| 418 |
+
]
|
| 419 |
+
generated_facts = model.generate(mes).strip()
|
| 420 |
+
formatted_facts = generated_facts.split("-")
|
| 421 |
+
# topic = line["subject"]
|
| 422 |
+
# abstract = line["abstract"]
|
| 423 |
+
|
| 424 |
+
# ground-truth caption
|
| 425 |
+
content = line["caption"]
|
| 426 |
+
|
| 427 |
+
score_out = get_score(
|
| 428 |
+
model,
|
| 429 |
+
content,
|
| 430 |
+
gamma=10,
|
| 431 |
+
scales=[0, 5],
|
| 432 |
+
atomic_facts=formatted_facts,
|
| 433 |
+
)
|
| 434 |
+
return score_out
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci4eval_req.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
evaluate
|
| 2 |
+
pycocoevalcap
|
| 3 |
+
bert_score
|
| 4 |
+
rouge_score
|
| 5 |
+
nltk
|
| 6 |
+
absl-py
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/olympiadbench.py
ADDED
|
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import json
|
| 3 |
+
from math import isclose
|
| 4 |
+
from decimal import Decimal, getcontext
|
| 5 |
+
from fractions import Fraction
|
| 6 |
+
import sys
|
| 7 |
+
import math
|
| 8 |
+
import timeout_decorator
|
| 9 |
+
import logging
|
| 10 |
+
from ...smp import *
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import sympy as sp
|
| 14 |
+
from sympy import simplify, Eq, sympify, evalf, Pow
|
| 15 |
+
from sympy.parsing.latex import parse_latex
|
| 16 |
+
import antlr4
|
| 17 |
+
except ImportError:
|
| 18 |
+
logging.warning('sympy or antlr4 is not installed, please install it for OlympiadBench evaluation.')
|
| 19 |
+
|
| 20 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_gpt4_extract_ICE():
|
| 24 |
+
example_1 = """
|
| 25 |
+
1.
|
| 26 |
+
Model response: 'Rounded to two decimal places, the perimeter of the sector is approximately:\n\n(-2, 1)'
|
| 27 |
+
Extracted Answer: (-2, 1)
|
| 28 |
+
""" # noqa
|
| 29 |
+
|
| 30 |
+
example_2 = """
|
| 31 |
+
2.
|
| 32 |
+
Model response: 'at those points.\n\nTherefore, the correct option that represents the meaning of the intersection points of the graphs is:\n\nD. They give the solutions to the equation $f(t)=g(t)$.",'
|
| 33 |
+
Extracted Answer: D
|
| 34 |
+
""" # noqa
|
| 35 |
+
|
| 36 |
+
example_3 = """
|
| 37 |
+
3.
|
| 38 |
+
Model response: ' at 1 (there's a closed circle at y = 1), the range in interval notation is \\((-4, 1]\\).\n\nFinal values:\nDomain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)'
|
| 39 |
+
Extracted Answer: Domain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)
|
| 40 |
+
""" # noqa
|
| 41 |
+
|
| 42 |
+
example_4 = """
|
| 43 |
+
4.
|
| 44 |
+
Model response: 'As it stands, I cannot provide the correct option letter because there isn't enough information to solve for 'y'.'
|
| 45 |
+
Extracted Answer: null
|
| 46 |
+
""" # noqa
|
| 47 |
+
|
| 48 |
+
example_5 = """
|
| 49 |
+
5.
|
| 50 |
+
Model response: 'Given that AB = 17.6 meters, we can now substitute into the equation:\n\nd = 17.6 / cos(38\u00b0)\n\nTherefore, to one decimal place, the distance d between Ned and Bart is approximately 22.3 meters.'
|
| 51 |
+
Extracted answer: 22.3
|
| 52 |
+
""" # noqa
|
| 53 |
+
|
| 54 |
+
example_6 = """
|
| 55 |
+
6.
|
| 56 |
+
Model response: have all the coefficients for the quadratic function:\n\\( f(x) = ax^2 + bx + c \\)\n\\( f(x) = -1x^2 - 2x + 1 \\)\n\nTherefore, the equation for the graphed function \\( f \\) is:\n\\( f(x) = -x^2 - 2x + 1 \\)"'
|
| 57 |
+
Extracted answer: f(x) = -x^2 - 2x + 1
|
| 58 |
+
""" # noqa
|
| 59 |
+
|
| 60 |
+
return [example_1, example_2, example_3, example_4, example_5, example_6]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def get_gpt4_score_ICE():
|
| 64 |
+
example_1 = """
|
| 65 |
+
[Question]: Write the set of numbers represented on the number line in interval notation.
|
| 66 |
+
[Standard Answer]: (-2,1]
|
| 67 |
+
[Model_answer] : Extracted Answer: \\((-2, 1)\\)
|
| 68 |
+
Judgement: 0
|
| 69 |
+
""" # noqa
|
| 70 |
+
|
| 71 |
+
example_2 = """
|
| 72 |
+
[Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}}
|
| 73 |
+
[Standard Answer]: C
|
| 74 |
+
[Model_answer] : B:2\u221a{{3}}
|
| 75 |
+
Judgement: 0
|
| 76 |
+
""" # noqa
|
| 77 |
+
|
| 78 |
+
example_3 = """
|
| 79 |
+
[Question]: Find the domain and range of the function f using interval notation.
|
| 80 |
+
[Standard Answer]: domain: [-4, 0) and range: (-3, 1]
|
| 81 |
+
[Model_answer] : Range: \\((-4, 1]\\)
|
| 82 |
+
Judgement: 0
|
| 83 |
+
""" # noqa
|
| 84 |
+
|
| 85 |
+
example_4 = """
|
| 86 |
+
[Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}}
|
| 87 |
+
[Standard Answer]: C
|
| 88 |
+
[Model_answer] : null
|
| 89 |
+
Judgement: 0
|
| 90 |
+
""" # noqa
|
| 91 |
+
|
| 92 |
+
return [example_1, example_2, example_3, example_4]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def build_olympiad_gpt4_extract_prompt(line):
|
| 96 |
+
task_description = """
|
| 97 |
+
I am providing you a response from a model to a math problem, termed 'Model Response'. You should extract the answer from the response as 'Extracted Answer'. Directly output the extracted answer with no explanation.\n\n
|
| 98 |
+
""" # noqa
|
| 99 |
+
prediction = str(line['prediction'])
|
| 100 |
+
demo_prompt = task_description
|
| 101 |
+
examples = get_gpt4_extract_ICE()
|
| 102 |
+
for example in examples:
|
| 103 |
+
demo_prompt += example + '\n\n'
|
| 104 |
+
test_prompt = f"Model response: '{prediction}'\nExtracted Answer: "
|
| 105 |
+
full_prompt = f'{demo_prompt}7.\n{test_prompt}'
|
| 106 |
+
|
| 107 |
+
return full_prompt
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def build_olympiad_gpt4_score_prompt(line):
|
| 111 |
+
task_description = """
|
| 112 |
+
Below are two answers to a math or a physics question. Question is [Question], [Standard Answer] is the standard answer to the question, and [Model_answer] is the answer extracted from a model's output to this question. Determine whether these two answers are consistent.
|
| 113 |
+
Please note that only when the [Model_answer] completely matches the [Standard Answer] means they are consistent. For non-multiple-choice questions, if the meaning is expressed in the same way, it is also considered consistent, for example, 0.5m and 50cm.
|
| 114 |
+
If they are consistent, Judement is 1; if they are different, Judement is 0.\n\n
|
| 115 |
+
""" # noqa
|
| 116 |
+
question_for_eval = line['question']
|
| 117 |
+
extract = line['extract']
|
| 118 |
+
answer = line['final_answer']
|
| 119 |
+
demo_prompt = task_description
|
| 120 |
+
examples = get_gpt4_score_ICE()
|
| 121 |
+
for example in examples:
|
| 122 |
+
demo_prompt += example + '\n\n'
|
| 123 |
+
test_prompt = f"""
|
| 124 |
+
[Question]: {question_for_eval}
|
| 125 |
+
[Standard Answer]: {answer}
|
| 126 |
+
[Model_answer] : {extract}
|
| 127 |
+
Judgement:"""
|
| 128 |
+
full_prompt = f'{demo_prompt}{test_prompt}'
|
| 129 |
+
|
| 130 |
+
return full_prompt
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def post_check_score(line, prefetch=False):
|
| 134 |
+
ans = str(line['final_answer']).strip()
|
| 135 |
+
response = str(line['extract']).strip()
|
| 136 |
+
|
| 137 |
+
if response == ans:
|
| 138 |
+
return response if prefetch else True
|
| 139 |
+
else:
|
| 140 |
+
return False
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def Olympiad_auxeval_extract(model, line):
|
| 144 |
+
prompt = build_olympiad_gpt4_extract_prompt(line)
|
| 145 |
+
log = ''
|
| 146 |
+
retry = 5
|
| 147 |
+
for i in range(retry):
|
| 148 |
+
prediction = line['prediction']
|
| 149 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 150 |
+
|
| 151 |
+
if FAIL_MSG in res:
|
| 152 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 153 |
+
else:
|
| 154 |
+
log += 'Succeed'
|
| 155 |
+
return dict(log_extract=log, extract=res)
|
| 156 |
+
log += 'All 5 retries failed.\n'
|
| 157 |
+
return dict(log_extract=log, extract='')
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def Olympiad_auxeval_score(model, line):
|
| 161 |
+
prompt = build_olympiad_gpt4_score_prompt(line)
|
| 162 |
+
log = ''
|
| 163 |
+
retry = 5
|
| 164 |
+
if post_check_score(line, prefetch=True):
|
| 165 |
+
res = post_check_score(line, prefetch=True)
|
| 166 |
+
return dict(log_score='Prefetch succeed', score=True)
|
| 167 |
+
for i in range(retry):
|
| 168 |
+
prediction = line['prediction']
|
| 169 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 170 |
+
|
| 171 |
+
if FAIL_MSG in res or res.strip() not in ['0', '1']:
|
| 172 |
+
log += f'Try {i}: output is {prediction}, res is {res}, failed to parse.\n'
|
| 173 |
+
else:
|
| 174 |
+
log += 'Succeed'
|
| 175 |
+
return dict(log_score=log, score=int(res) == 1)
|
| 176 |
+
log += 'All 5 retries failed.\n'
|
| 177 |
+
return dict(log_score=log, score=False)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
chinese_answer_type_dict = {
|
| 181 |
+
'Numerical': '数值',
|
| 182 |
+
'Expression': '表达式',
|
| 183 |
+
'Equation': '方程',
|
| 184 |
+
'Interval': '区间'
|
| 185 |
+
}
|
| 186 |
+
english_answer_type_dict = {
|
| 187 |
+
'Numerical': 'a numerical value',
|
| 188 |
+
'Expression': 'an expression',
|
| 189 |
+
'Equation': 'an equation',
|
| 190 |
+
'Interval': 'an interval'
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def get_single_answer_type_text(answer_type, is_chinese):
|
| 195 |
+
if '-' in answer_type: # No need now
|
| 196 |
+
answer_type = answer_type[:answer_type.find('-')]
|
| 197 |
+
for t in ['Numerical', 'Expression', 'Equation', 'Interval']:
|
| 198 |
+
if t in answer_type:
|
| 199 |
+
if is_chinese:
|
| 200 |
+
return chinese_answer_type_dict[t]
|
| 201 |
+
else:
|
| 202 |
+
return english_answer_type_dict[t]
|
| 203 |
+
exit(f'Error parsing answer type {answer_type}!')
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def get_answer_type_text(answer_type, is_chinese, multiple_answer):
|
| 207 |
+
# 'Tuple' has various meanings in different context, such as position or values of a series of variable,
|
| 208 |
+
# so it may lead to confusion to directly use 'tuple' in the prompt.
|
| 209 |
+
if ('Need_human_evaluate' in answer_type) or ('Tuple' in answer_type):
|
| 210 |
+
full_answer_text = ''
|
| 211 |
+
else:
|
| 212 |
+
if not multiple_answer:
|
| 213 |
+
answer_text = get_single_answer_type_text(answer_type, is_chinese)
|
| 214 |
+
if is_chinese:
|
| 215 |
+
full_answer_text = f',答案类型为{answer_text}'
|
| 216 |
+
else:
|
| 217 |
+
full_answer_text = f"The answer of The problem should be {answer_text}. "
|
| 218 |
+
else:
|
| 219 |
+
if ',' not in answer_type: # Same answer type for all answers
|
| 220 |
+
answer_text = get_single_answer_type_text(answer_type, is_chinese)
|
| 221 |
+
if is_chinese:
|
| 222 |
+
full_answer_text = f',题目有多个答案,答案类型均为{answer_text}'
|
| 223 |
+
else:
|
| 224 |
+
full_answer_text = f'The problem has multiple answers, each of them should be {answer_text}. '
|
| 225 |
+
else:
|
| 226 |
+
answer_types = answer_type.split(',')
|
| 227 |
+
answer_types = [get_single_answer_type_text(t, is_chinese) for t in answer_types]
|
| 228 |
+
if len(set(answer_types)) == 1:
|
| 229 |
+
answer_text = answer_types[0]
|
| 230 |
+
if is_chinese:
|
| 231 |
+
full_answer_text = f',题目有多个答案,答案类型均为{answer_text}'
|
| 232 |
+
else:
|
| 233 |
+
full_answer_text = f'The problem has multiple answers, each of them should be {answer_text}. '
|
| 234 |
+
else:
|
| 235 |
+
if is_chinese:
|
| 236 |
+
answer_text = '、'.join(answer_types)
|
| 237 |
+
full_answer_text = f',题目有多个答案,答案类型分别为{answer_text}'
|
| 238 |
+
else:
|
| 239 |
+
answer_text = ', '.join(answer_types)
|
| 240 |
+
full_answer_text = (
|
| 241 |
+
f'The problem has multiple answers, with the answers in order being {answer_text}. '
|
| 242 |
+
)
|
| 243 |
+
return full_answer_text
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def make_input(prompt, question_content):
|
| 247 |
+
# diversified based on the vllm, which is not implemented temporarily
|
| 248 |
+
input = prompt + '\n' + question_content
|
| 249 |
+
return input
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
sys.set_int_max_str_digits(1000000)
|
| 253 |
+
# 设置decimal的精度
|
| 254 |
+
getcontext().prec = 50
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class MathJudger:
|
| 258 |
+
def __init__(self):
|
| 259 |
+
self.special_signal_map = {
|
| 260 |
+
"\\left": "",
|
| 261 |
+
"\\right": "",
|
| 262 |
+
"∶": ":",
|
| 263 |
+
",": ",",
|
| 264 |
+
"$": "",
|
| 265 |
+
"\\approx": "=",
|
| 266 |
+
"\\simeq": "=",
|
| 267 |
+
"\\sim": "=",
|
| 268 |
+
"^\\prime": "'",
|
| 269 |
+
"^{\\prime}": "'",
|
| 270 |
+
"^\\circ": "",
|
| 271 |
+
"%": "",
|
| 272 |
+
}
|
| 273 |
+
self.pi = parse_latex("\\pi")
|
| 274 |
+
self.precision = 1e-8
|
| 275 |
+
|
| 276 |
+
def split_by_comma(self, expr: str):
|
| 277 |
+
in_bracket_num = 0
|
| 278 |
+
splitted_expr = []
|
| 279 |
+
start_idx = 0
|
| 280 |
+
for i, char in enumerate(expr):
|
| 281 |
+
if char == "(" or char == "[":
|
| 282 |
+
in_bracket_num += 1
|
| 283 |
+
elif char == ")" or char == "]":
|
| 284 |
+
in_bracket_num -= 1
|
| 285 |
+
elif char == "," and in_bracket_num == 0:
|
| 286 |
+
splitted_expr.append(expr[start_idx:i].strip())
|
| 287 |
+
start_idx = i + 1
|
| 288 |
+
|
| 289 |
+
if start_idx < len(expr):
|
| 290 |
+
splitted_expr.append(expr[start_idx:].strip())
|
| 291 |
+
|
| 292 |
+
return splitted_expr
|
| 293 |
+
|
| 294 |
+
def trans_plus_minus_sign(self, expr_list: list):
|
| 295 |
+
new_expr_list = []
|
| 296 |
+
for expr in expr_list:
|
| 297 |
+
if "\\pm" in expr:
|
| 298 |
+
new_expr_list.append(expr.replace("\\pm", "+"))
|
| 299 |
+
new_expr_list.append(expr.replace("\\pm", "-"))
|
| 300 |
+
else:
|
| 301 |
+
new_expr_list.append(expr)
|
| 302 |
+
|
| 303 |
+
return new_expr_list
|
| 304 |
+
|
| 305 |
+
def judge(self, expression1, expression2, precision=1e-8):
|
| 306 |
+
# (默认 expression1 为 Ground_Truth)
|
| 307 |
+
precision = precision if isinstance(precision, list) else [precision]
|
| 308 |
+
|
| 309 |
+
try:
|
| 310 |
+
expression1, expression2 = self.preprocess(expression1, expression2)
|
| 311 |
+
except:
|
| 312 |
+
return False
|
| 313 |
+
if expression1 == expression2:
|
| 314 |
+
# print("原生相等")
|
| 315 |
+
return True
|
| 316 |
+
|
| 317 |
+
# 去除字符串中的中文字符,因为上面已经判断过了类似回答为"能"或"不能"的含有中文字符的回答情况
|
| 318 |
+
expression1 = re.sub(r'[\u4e00-\u9fff]+', '', expression1)
|
| 319 |
+
expression2 = re.sub(r'[\u4e00-\u9fff]+', '', expression2)
|
| 320 |
+
|
| 321 |
+
expression1 = self.split_by_comma(expression1)
|
| 322 |
+
expression2 = self.split_by_comma(expression2)
|
| 323 |
+
|
| 324 |
+
temp_list1 = self.trans_plus_minus_sign(expression1)
|
| 325 |
+
temp_list2 = self.trans_plus_minus_sign(expression2)
|
| 326 |
+
|
| 327 |
+
# 设计误差值列表
|
| 328 |
+
if len(precision) <= 1:
|
| 329 |
+
precision = precision * len(temp_list1)
|
| 330 |
+
|
| 331 |
+
if len(temp_list1) != len(temp_list2):
|
| 332 |
+
return False
|
| 333 |
+
|
| 334 |
+
# 判断两个列表中的元素是否可以两两配对,并且两两相等,由此支持多个回答的比较
|
| 335 |
+
idx = -1
|
| 336 |
+
while len(temp_list1) != 0:
|
| 337 |
+
idx = (idx + 1) % len(temp_list1)
|
| 338 |
+
|
| 339 |
+
item1 = temp_list1[idx]
|
| 340 |
+
self.precision = precision[idx]
|
| 341 |
+
# print(self.precision)
|
| 342 |
+
|
| 343 |
+
for item2 in temp_list2:
|
| 344 |
+
try:
|
| 345 |
+
if self.is_equal(item1, item2):
|
| 346 |
+
temp_list1.remove(item1)
|
| 347 |
+
temp_list2.remove(item2)
|
| 348 |
+
precision.remove(self.precision)
|
| 349 |
+
break
|
| 350 |
+
except Exception as err:
|
| 351 |
+
logging.warning(f'{type(err)}: {err}')
|
| 352 |
+
continue
|
| 353 |
+
else:
|
| 354 |
+
# If we didn't break from the inner loop, it means no match was found
|
| 355 |
+
return False
|
| 356 |
+
|
| 357 |
+
# If all elements are matched and removed, the lists can be paired
|
| 358 |
+
return True
|
| 359 |
+
|
| 360 |
+
def is_interval(self, epr):
|
| 361 |
+
return epr.startswith(("(", "[")) and epr.endswith((")", "]"))
|
| 362 |
+
|
| 363 |
+
# 在进行数值计算前,需要将sympy中的pi符号替换为pi的近似数值
|
| 364 |
+
# def sympy_sub_pi(self, expression_sympy):
|
| 365 |
+
# return expression_sympy.subs(self.pi, math.pi)
|
| 366 |
+
|
| 367 |
+
# 默认第一个表达式是 ground_truth
|
| 368 |
+
@timeout_decorator.timeout(30)
|
| 369 |
+
def is_equal(self, expression1, expression2):
|
| 370 |
+
if expression1 == expression2 and expression1 != "" and expression2 != "":
|
| 371 |
+
# print("原生等价")
|
| 372 |
+
return True
|
| 373 |
+
|
| 374 |
+
# 先判断是否是两个区间,是的话进行判断相等,不相等则返回 False
|
| 375 |
+
if self.is_interval(expression1) and self.is_interval(expression2):
|
| 376 |
+
try:
|
| 377 |
+
if self.interval_equal(expression1, expression2):
|
| 378 |
+
# print("区间等价")
|
| 379 |
+
return True
|
| 380 |
+
except:
|
| 381 |
+
return False
|
| 382 |
+
|
| 383 |
+
# 再判断是否在数值上相等
|
| 384 |
+
try:
|
| 385 |
+
if self.numerical_equal(expression1, expression2):
|
| 386 |
+
# print("数值等价")
|
| 387 |
+
return True
|
| 388 |
+
except:
|
| 389 |
+
pass
|
| 390 |
+
|
| 391 |
+
# 再判断是否是表达式相等
|
| 392 |
+
try:
|
| 393 |
+
if self.expression_equal(expression1, expression2) and not ("=" in expression1 and "=" in expression2):
|
| 394 |
+
# print("表达式等价")
|
| 395 |
+
return True
|
| 396 |
+
except:
|
| 397 |
+
pass
|
| 398 |
+
|
| 399 |
+
# 再判断是否是等式相等
|
| 400 |
+
try:
|
| 401 |
+
if self.equation_equal(expression1, expression2):
|
| 402 |
+
# print("等式等价")
|
| 403 |
+
return True
|
| 404 |
+
except:
|
| 405 |
+
pass
|
| 406 |
+
|
| 407 |
+
return False
|
| 408 |
+
|
| 409 |
+
# 判断���个数值在误差允许范围内是否相等
|
| 410 |
+
def numerical_equal(self, expression1: str, expression2: str, include_percentage: bool = True):
|
| 411 |
+
"""
|
| 412 |
+
(默认 expression1 为 Ground_Truth)
|
| 413 |
+
函数: 判读两个数值是否在误差允许范围内相等
|
| 414 |
+
步骤1: 将可能出现的百分号的情况包含进来
|
| 415 |
+
步骤2: 使用 math.isclose 函数判断是否相等
|
| 416 |
+
"""
|
| 417 |
+
reference = float(expression1)
|
| 418 |
+
prediction = float(expression2)
|
| 419 |
+
|
| 420 |
+
if include_percentage:
|
| 421 |
+
gt_result = [reference / 100, reference, reference * 100]
|
| 422 |
+
else:
|
| 423 |
+
gt_result = [reference]
|
| 424 |
+
|
| 425 |
+
for item in gt_result:
|
| 426 |
+
# if isclose(item, prediction, abs_tol=self.precision, rel_tol=0):
|
| 427 |
+
if abs(item - prediction) <= self.precision * 1.01:
|
| 428 |
+
return True
|
| 429 |
+
return False
|
| 430 |
+
|
| 431 |
+
def expression_equal(self, exp1, exp2):
|
| 432 |
+
"""
|
| 433 |
+
(默认 expression1 为 Ground_Truth)
|
| 434 |
+
函数: 判断两个表达式是否在数学意义上等价
|
| 435 |
+
步骤1: 提取表达式, 防止有的模型会给出"x=1"而不是"1"
|
| 436 |
+
步骤2: 使用 sympy 库进行等价判断
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
# 只提取等号右边的表达式,一般左边是所求的量
|
| 440 |
+
def extract_expression(expression):
|
| 441 |
+
if "=" in expression:
|
| 442 |
+
expression = expression.split("=")[1]
|
| 443 |
+
return expression.strip()
|
| 444 |
+
|
| 445 |
+
exp1 = extract_expression(exp1)
|
| 446 |
+
exp2 = extract_expression(exp2)
|
| 447 |
+
|
| 448 |
+
exp_too_long = len(exp1) > 300 or len(exp2) > 300
|
| 449 |
+
|
| 450 |
+
# 将表达式转换为 sympy 中能够进行处理的格式
|
| 451 |
+
expr1_sym = sympify(parse_latex(exp1))
|
| 452 |
+
expr2_sym = sympify(parse_latex(exp2))
|
| 453 |
+
|
| 454 |
+
if expr1_sym == expr2_sym:
|
| 455 |
+
return True
|
| 456 |
+
else:
|
| 457 |
+
expr1_sym = self.sympy_sub_pi(expr1_sym)
|
| 458 |
+
expr2_sym = self.sympy_sub_pi(expr2_sym)
|
| 459 |
+
# 如果输入的表达式可以计算出具体数值的话,则将其进行数值计算的比较
|
| 460 |
+
|
| 461 |
+
if (expr1_sym.has(sp.Symbol) and not expr2_sym.has(sp.Symbol)) or (
|
| 462 |
+
not expr1_sym.has(sp.Symbol) and expr2_sym.has(sp.Symbol)):
|
| 463 |
+
return False
|
| 464 |
+
elif not expr1_sym.has(sp.Symbol) and not expr2_sym.has(sp.Symbol):
|
| 465 |
+
try:
|
| 466 |
+
if not (self.can_compute_power(expr1_sym) and self.can_compute_power(expr2_sym)):
|
| 467 |
+
print(
|
| 468 |
+
"These two number can not be calculated by current computer for: "
|
| 469 |
+
f"\"{str(expr1_sym)}\" and \"{str(expr2_sym)}\""
|
| 470 |
+
)
|
| 471 |
+
return False
|
| 472 |
+
if exp_too_long:
|
| 473 |
+
print(f'Expression {exp1} or {exp2} is too long to compute. ')
|
| 474 |
+
return False
|
| 475 |
+
|
| 476 |
+
if abs(expr1_sym.evalf() - expr2_sym.evalf()) <= self.precision * 1.01:
|
| 477 |
+
return True
|
| 478 |
+
else:
|
| 479 |
+
return False
|
| 480 |
+
except:
|
| 481 |
+
return False
|
| 482 |
+
elif exp_too_long:
|
| 483 |
+
print(f'Expression {exp1} or {exp2} is too long to compute. ')
|
| 484 |
+
return False
|
| 485 |
+
else:
|
| 486 |
+
try:
|
| 487 |
+
simplified_expr = simplify(expr1_sym - expr2_sym)
|
| 488 |
+
|
| 489 |
+
num_value = simplified_expr.evalf()
|
| 490 |
+
|
| 491 |
+
return abs(num_value) < 1e-3
|
| 492 |
+
except:
|
| 493 |
+
return False
|
| 494 |
+
|
| 495 |
+
def equation_equal(self, expression1, expression2):
|
| 496 |
+
"""
|
| 497 |
+
(默认 expression1 为 Ground_Truth)
|
| 498 |
+
函数: 判断两个方程是否在数学意义上等价
|
| 499 |
+
步骤1: 将一个方程/等式化简为标准方程, 即等式的右边严格等于0, 接下来只需要判断两个等式的左边是否"等价"
|
| 500 |
+
步骤2: 使用 sympy 库计算两个等式左边的商, 如果这个商或者这个商的倒数为整数, 那么数学意义上我们可以推导出这两个方程等价👌
|
| 501 |
+
"""
|
| 502 |
+
|
| 503 |
+
# 将等式的右边都移到左边,并返回一个 sympy 格式的表达式
|
| 504 |
+
def simplify_equation(latex_eq):
|
| 505 |
+
# 分割等式的左边和右边
|
| 506 |
+
lhs, rhs = latex_eq.split('=')
|
| 507 |
+
|
| 508 |
+
# 使用 parse_latex 解析 LaTeX 表达式
|
| 509 |
+
lhs_expr = parse_latex(lhs)
|
| 510 |
+
rhs_expr = parse_latex(rhs)
|
| 511 |
+
|
| 512 |
+
# 创建等式对象
|
| 513 |
+
equation = Eq(lhs_expr, rhs_expr)
|
| 514 |
+
|
| 515 |
+
# 化简等式:将等式右边移到左边
|
| 516 |
+
simplified_eq = simplify(equation.lhs - equation.rhs)
|
| 517 |
+
|
| 518 |
+
return simplified_eq
|
| 519 |
+
|
| 520 |
+
expr1_sym = simplify_equation(expression1)
|
| 521 |
+
expr2_sym = simplify_equation(expression2)
|
| 522 |
+
|
| 523 |
+
division_result_1 = simplify(expr1_sym / expr2_sym)
|
| 524 |
+
division_result_2 = simplify(expr2_sym / expr1_sym)
|
| 525 |
+
|
| 526 |
+
# 如果两个方程转换后的式子相除为整数 且非零,则根据推导可知这两个方程等价
|
| 527 |
+
if (division_result_1.is_Integer and division_result_1 != 0) or (
|
| 528 |
+
division_result_2.is_Integer and division_result_2 != 0):
|
| 529 |
+
return True
|
| 530 |
+
else:
|
| 531 |
+
return False
|
| 532 |
+
|
| 533 |
+
def interval_equal(self, expression1, expression2):
|
| 534 |
+
# 函数: 判断两个区间是否在数学意义上等价
|
| 535 |
+
# 步骤1: 简化区间的表达式, 去除无关的符号比如"\left", "\right", 同时将可能出现的"x \in"删去
|
| 536 |
+
# 步骤2: 对比两个区间的左右符号、中间出现的数学表达式等是否一致
|
| 537 |
+
|
| 538 |
+
def compare_two_interval(inter1, inter2):
|
| 539 |
+
|
| 540 |
+
# 首先比较两边的括号是否一致,一致的话再进行下一步比较
|
| 541 |
+
if inter1[0] != inter2[0] or inter1[-1] != inter2[-1]:
|
| 542 |
+
return False
|
| 543 |
+
|
| 544 |
+
inter1 = inter1.strip('[]()')
|
| 545 |
+
inter2 = inter2.strip('[]()')
|
| 546 |
+
|
| 547 |
+
# 分割区间的左右部分
|
| 548 |
+
items_1 = inter1.split(',')
|
| 549 |
+
items_2 = inter2.split(',')
|
| 550 |
+
|
| 551 |
+
for item_1, item_2 in zip(items_1, items_2):
|
| 552 |
+
if not self.expression_equal(item_1, item_2):
|
| 553 |
+
return False
|
| 554 |
+
return True
|
| 555 |
+
|
| 556 |
+
interval1 = expression1
|
| 557 |
+
interval2 = expression2
|
| 558 |
+
|
| 559 |
+
if interval1 == interval2:
|
| 560 |
+
return True
|
| 561 |
+
else:
|
| 562 |
+
inter_list1 = interval1.split("\\cup")
|
| 563 |
+
inter_list2 = interval2.split("\\cup")
|
| 564 |
+
|
| 565 |
+
if len(inter_list1) != len(inter_list2):
|
| 566 |
+
return False
|
| 567 |
+
else:
|
| 568 |
+
for inter1, inter2 in zip(inter_list1, inter_list2):
|
| 569 |
+
if not compare_two_interval(inter1, inter2):
|
| 570 |
+
return False
|
| 571 |
+
return True
|
| 572 |
+
|
| 573 |
+
def preprocess(self, expression1, expression2):
|
| 574 |
+
|
| 575 |
+
# 尝试捕获box中的内容,如果有多个则以逗号相连返回,如果一个都没有,则报错
|
| 576 |
+
def extract_boxed_content(latex_str):
|
| 577 |
+
# 查找所有的 \boxed{...} 结构
|
| 578 |
+
boxed_matches = re.finditer(r'\\boxed{', latex_str)
|
| 579 |
+
results = ""
|
| 580 |
+
|
| 581 |
+
for match in boxed_matches:
|
| 582 |
+
start_index = match.end()
|
| 583 |
+
end_index = start_index
|
| 584 |
+
stack = 1
|
| 585 |
+
|
| 586 |
+
# 从 \boxed{ 之后开始搜索,直到找到对应的闭合括号
|
| 587 |
+
while stack > 0 and end_index < len(latex_str):
|
| 588 |
+
if latex_str[end_index] == '{':
|
| 589 |
+
stack += 1
|
| 590 |
+
elif latex_str[end_index] == '}':
|
| 591 |
+
stack -= 1
|
| 592 |
+
end_index += 1
|
| 593 |
+
|
| 594 |
+
if stack == 0:
|
| 595 |
+
# 提取 \boxed{} 内部的内容
|
| 596 |
+
content = latex_str[start_index:end_index - 1]
|
| 597 |
+
results += content + ","
|
| 598 |
+
else:
|
| 599 |
+
# 如果括号没有正确闭合,则返回错误信息
|
| 600 |
+
raise ValueError("Mismatched braces in LaTeX string.")
|
| 601 |
+
|
| 602 |
+
# 如果没有匹配到'\boxed{}'字符,则默认提取有内容的文字最后一行中的所有公式部分
|
| 603 |
+
if results == "":
|
| 604 |
+
last_line_ans = latex_str.strip().split("\n")[-1]
|
| 605 |
+
dollar_pattern = r"\$(.*?)\$"
|
| 606 |
+
answers = re.findall(dollar_pattern, last_line_ans)
|
| 607 |
+
|
| 608 |
+
if answers:
|
| 609 |
+
for ans in answers:
|
| 610 |
+
results += ans + ","
|
| 611 |
+
else:
|
| 612 |
+
results = latex_str
|
| 613 |
+
|
| 614 |
+
return results
|
| 615 |
+
|
| 616 |
+
def sepcial_symbol_replace(expression):
|
| 617 |
+
if "\\in " in expression:
|
| 618 |
+
expression = expression.split("\\in ")[1]
|
| 619 |
+
|
| 620 |
+
# 进行特殊字符的替换,这些字符都不影响latex的解析,属于美观/修饰性字符
|
| 621 |
+
for signal in self.special_signal_map:
|
| 622 |
+
expression = expression.replace(signal, self.special_signal_map[signal])
|
| 623 |
+
|
| 624 |
+
expression = expression.strip("\n$,.:;^_=+`!@#$%^&*~,。")
|
| 625 |
+
|
| 626 |
+
pattern = r'\\(?:mathrm|mathbf)\{~?([^}]*)\}'
|
| 627 |
+
expression = re.sub(pattern, r'\1', expression)
|
| 628 |
+
|
| 629 |
+
return expression
|
| 630 |
+
|
| 631 |
+
exp1, exp2 = extract_boxed_content(expression1), extract_boxed_content(expression2)
|
| 632 |
+
exp1, exp2 = sepcial_symbol_replace(exp1), sepcial_symbol_replace(exp2)
|
| 633 |
+
|
| 634 |
+
return exp1, exp2
|
| 635 |
+
|
| 636 |
+
def can_compute_power(self, expr):
|
| 637 |
+
"""
|
| 638 |
+
Check if the power expression can be computed.
|
| 639 |
+
|
| 640 |
+
Parameters:
|
| 641 |
+
expr (sympy expression): The expression to check.
|
| 642 |
+
|
| 643 |
+
Returns:
|
| 644 |
+
bool: True if the expression can be computed, False otherwise.
|
| 645 |
+
"""
|
| 646 |
+
# Check if the expression is a power expression
|
| 647 |
+
if isinstance(expr, Pow):
|
| 648 |
+
# Extract the base and the exponent
|
| 649 |
+
base, exp = expr.as_base_exp()
|
| 650 |
+
|
| 651 |
+
# Check if the base and the exponent are numbers
|
| 652 |
+
if base.is_number and exp.is_number:
|
| 653 |
+
# Set a threshold for the maximum size of the exponent
|
| 654 |
+
MAX_EXP = 1000 # This threshold can be adjusted based on the computing environment
|
| 655 |
+
|
| 656 |
+
# Check if the exponent is greater than the threshold
|
| 657 |
+
if abs(exp.evalf()) > MAX_EXP:
|
| 658 |
+
return False
|
| 659 |
+
else:
|
| 660 |
+
return True
|
| 661 |
+
else:
|
| 662 |
+
# If the base or the exponent is not a number, we cannot compute the power
|
| 663 |
+
return False
|
| 664 |
+
else:
|
| 665 |
+
# If the expression is not a power expression, return True as it is not the case we are checking for
|
| 666 |
+
return True
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def extract_answer(is_chinese, model_output, is_deepseek=False):
|
| 670 |
+
# deepseekmath has special answering format
|
| 671 |
+
if str(model_output) == 'nan':
|
| 672 |
+
model_output = 'nan'
|
| 673 |
+
|
| 674 |
+
if is_deepseek:
|
| 675 |
+
if is_chinese:
|
| 676 |
+
matches = re.findall('## 解题答案(.*)', model_output)
|
| 677 |
+
else:
|
| 678 |
+
matches = re.findall('The answer is: (.*)', model_output)
|
| 679 |
+
|
| 680 |
+
# 检测是否至少找到一个匹配,如果没有就直接整个送进去找\boxed{}
|
| 681 |
+
if matches:
|
| 682 |
+
# 如果找到多个匹配,取最后一个
|
| 683 |
+
model_answer = matches[-1].strip()
|
| 684 |
+
return model_answer
|
| 685 |
+
else:
|
| 686 |
+
return model_output
|
| 687 |
+
|
| 688 |
+
if is_chinese:
|
| 689 |
+
matches = re.findall('所以最终答案是(.*)', model_output)
|
| 690 |
+
else:
|
| 691 |
+
matches = re.findall('So the final answer is (.*)', model_output)
|
| 692 |
+
|
| 693 |
+
# 检测是否至少找到一个匹配,如果没有就直接整个送进去找\boxed{}
|
| 694 |
+
if matches:
|
| 695 |
+
# 如果找到多个匹配,取最后一个
|
| 696 |
+
model_answer = matches[-1].strip()
|
| 697 |
+
return model_answer
|
| 698 |
+
else:
|
| 699 |
+
return model_output
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def calculate_merged_accuracy(reference_dir, text_only):
|
| 703 |
+
pass
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/omni_verifier.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import numpy as np
|
| 3 |
+
from .judge_util import build_judge
|
| 4 |
+
from .yorn import YOrN_Extraction
|
| 5 |
+
|
| 6 |
+
EVAL_TMPL = """
|
| 7 |
+
You are an AI assistant tasked with evaluating whether a model's response correctly answers
|
| 8 |
+
a given visual-language question.
|
| 9 |
+
You will be provided with: 1. The model's response; 2. The ground truth answer.
|
| 10 |
+
Your task is to determine whether the model's response conveys the same meaning as the ground truth.
|
| 11 |
+
The response is considered **correct** if:
|
| 12 |
+
- It has the same meaning as the ground truth, even if phrased differently.
|
| 13 |
+
- It provides additional relevant details without altering the original meaning.
|
| 14 |
+
The response is considered **wrong** if:
|
| 15 |
+
- It contradicts the ground-truth
|
| 16 |
+
- It misses essential information or include additional incorrect information.
|
| 17 |
+
Your evaluation should include a single word (Either `"yes"` (if correct) or `"no"` (if incorrect)).
|
| 18 |
+
|
| 19 |
+
Now please complete the following task:
|
| 20 |
+
[Begin Response]{response}[End Response]
|
| 21 |
+
[Begin Ground-Truth]{ground_truth}[End Ground-Truth]
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _process_digit_article(inText):
|
| 26 |
+
outText = []
|
| 27 |
+
tempText = inText.lower().split()
|
| 28 |
+
articles = ['a', 'an', 'the']
|
| 29 |
+
manualMap = {
|
| 30 |
+
'none': '0', 'zero': '0',
|
| 31 |
+
'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5',
|
| 32 |
+
'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10',
|
| 33 |
+
'eleven': '11', 'twelve': '12', 'thirteen': '13', 'fourteen': '14',
|
| 34 |
+
'fifteen': '15', 'sixteen': '16', 'seventeen': '17', 'eighteen': '18',
|
| 35 |
+
'nineteen': '19', 'twenty': '20',
|
| 36 |
+
}
|
| 37 |
+
contractions = {
|
| 38 |
+
'aint': "ain't",
|
| 39 |
+
'arent': "aren't",
|
| 40 |
+
'cant': "can't",
|
| 41 |
+
'couldve': "could've",
|
| 42 |
+
'couldnt': "couldn't",
|
| 43 |
+
"couldn'tve": "couldn't've",
|
| 44 |
+
"couldnt've": "couldn't've",
|
| 45 |
+
'didnt': "didn't",
|
| 46 |
+
'doesnt': "doesn't",
|
| 47 |
+
'dont': "don't",
|
| 48 |
+
'hadnt': "hadn't",
|
| 49 |
+
"hadnt've": "hadn't've",
|
| 50 |
+
"hadn'tve": "hadn't've",
|
| 51 |
+
'hasnt': "hasn't",
|
| 52 |
+
'havent': "haven't",
|
| 53 |
+
'hed': "he'd",
|
| 54 |
+
"hed've": "he'd've",
|
| 55 |
+
"he'dve": "he'd've",
|
| 56 |
+
'hes': "he's",
|
| 57 |
+
'howd': "how'd",
|
| 58 |
+
'howll': "how'll",
|
| 59 |
+
'hows': "how's",
|
| 60 |
+
"Id've": "I'd've",
|
| 61 |
+
"I'dve": "I'd've",
|
| 62 |
+
'Im': "I'm",
|
| 63 |
+
'Ive': "I've",
|
| 64 |
+
'isnt': "isn't",
|
| 65 |
+
'itd': "it'd",
|
| 66 |
+
"itd've": "it'd've",
|
| 67 |
+
"it'dve": "it'd've",
|
| 68 |
+
'itll': "it'll",
|
| 69 |
+
"let's": "let's",
|
| 70 |
+
'maam': "ma'am",
|
| 71 |
+
'mightnt': "mightn't",
|
| 72 |
+
"mightnt've": "mightn't've",
|
| 73 |
+
"mightn'tve": "mightn't've",
|
| 74 |
+
'mightve': "might've",
|
| 75 |
+
'mustnt': "mustn't",
|
| 76 |
+
'mustve': "must've",
|
| 77 |
+
'neednt': "needn't",
|
| 78 |
+
'notve': "not've",
|
| 79 |
+
'oclock': "o'clock",
|
| 80 |
+
'oughtnt': "oughtn't",
|
| 81 |
+
"ow's'at": "'ow's'at",
|
| 82 |
+
"'ows'at": "'ow's'at",
|
| 83 |
+
"'ow'sat": "'ow's'at",
|
| 84 |
+
'shant': "shan't",
|
| 85 |
+
"shed've": "she'd've",
|
| 86 |
+
"she'dve": "she'd've",
|
| 87 |
+
"she's": "she's",
|
| 88 |
+
'shouldve': "should've",
|
| 89 |
+
'shouldnt': "shouldn't",
|
| 90 |
+
"shouldnt've": "shouldn't've",
|
| 91 |
+
"shouldn'tve": "shouldn't've",
|
| 92 |
+
"somebody'd": 'somebodyd',
|
| 93 |
+
"somebodyd've": "somebody'd've",
|
| 94 |
+
"somebody'dve": "somebody'd've",
|
| 95 |
+
'somebodyll': "somebody'll",
|
| 96 |
+
'somebodys': "somebody's",
|
| 97 |
+
'someoned': "someone'd",
|
| 98 |
+
"someoned've": "someone'd've",
|
| 99 |
+
"someone'dve": "someone'd've",
|
| 100 |
+
'someonell': "someone'll",
|
| 101 |
+
'someones': "someone's",
|
| 102 |
+
'somethingd': "something'd",
|
| 103 |
+
"somethingd've": "something'd've",
|
| 104 |
+
"something'dve": "something'd've",
|
| 105 |
+
'somethingll': "something'll",
|
| 106 |
+
'thats': "that's",
|
| 107 |
+
'thered': "there'd",
|
| 108 |
+
"thered've": "there'd've",
|
| 109 |
+
"there'dve": "there'd've",
|
| 110 |
+
'therere': "there're",
|
| 111 |
+
'theres': "there's",
|
| 112 |
+
'theyd': "they'd",
|
| 113 |
+
"theyd've": "they'd've",
|
| 114 |
+
"they'dve": "they'd've",
|
| 115 |
+
'theyll': "they'll",
|
| 116 |
+
'theyre': "they're",
|
| 117 |
+
'theyve': "they've",
|
| 118 |
+
'twas': "'twas",
|
| 119 |
+
'wasnt': "wasn't",
|
| 120 |
+
"wed've": "we'd've",
|
| 121 |
+
"we'dve": "we'd've",
|
| 122 |
+
'weve': "we've",
|
| 123 |
+
'werent': "weren't",
|
| 124 |
+
'whatll': "what'll",
|
| 125 |
+
'whatre': "what're",
|
| 126 |
+
'whats': "what's",
|
| 127 |
+
'whatve': "what've",
|
| 128 |
+
'whens': "when's",
|
| 129 |
+
'whered': "where'd",
|
| 130 |
+
'wheres': "where's",
|
| 131 |
+
'whereve': "where've",
|
| 132 |
+
'whod': "who'd",
|
| 133 |
+
"whod've": "who'd've",
|
| 134 |
+
"who'dve": "who'd've",
|
| 135 |
+
'wholl': "who'll",
|
| 136 |
+
'whos': "who's",
|
| 137 |
+
'whove': "who've",
|
| 138 |
+
'whyll': "why'll",
|
| 139 |
+
'whyre': "why're",
|
| 140 |
+
'whys': "why's",
|
| 141 |
+
'wont': "won't",
|
| 142 |
+
'wouldve': "would've",
|
| 143 |
+
'wouldnt': "wouldn't",
|
| 144 |
+
"wouldnt've": "wouldn't've",
|
| 145 |
+
"wouldn'tve": "wouldn't've",
|
| 146 |
+
'yall': "y'all",
|
| 147 |
+
"yall'll": "y'all'll",
|
| 148 |
+
"y'allll": "y'all'll",
|
| 149 |
+
"yall'd've": "y'all'd've",
|
| 150 |
+
"y'alld've": "y'all'd've",
|
| 151 |
+
"y'all'dve": "y'all'd've",
|
| 152 |
+
'youd': "you'd",
|
| 153 |
+
"youd've": "you'd've",
|
| 154 |
+
"you'dve": "you'd've",
|
| 155 |
+
'youll': "you'll",
|
| 156 |
+
'youre': "you're",
|
| 157 |
+
'youve': "you've",
|
| 158 |
+
}
|
| 159 |
+
for word in tempText:
|
| 160 |
+
word = manualMap.setdefault(word, word)
|
| 161 |
+
if word not in articles:
|
| 162 |
+
outText.append(word)
|
| 163 |
+
for wordId, word in enumerate(outText):
|
| 164 |
+
if word in contractions:
|
| 165 |
+
outText[wordId] = contractions[word]
|
| 166 |
+
outText = ' '.join(outText)
|
| 167 |
+
return outText
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class OmniVerifier:
|
| 171 |
+
|
| 172 |
+
tmpl_map = {
|
| 173 |
+
'boxed': r'\boxed\{([^}]*)\}',
|
| 174 |
+
'brace': r'\{([^}]*)\}'
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
def __init__(self,
|
| 178 |
+
tmpl=None,
|
| 179 |
+
judge='gpt-4o',
|
| 180 |
+
lower_case=True,
|
| 181 |
+
rule_only=False,
|
| 182 |
+
retry=3,
|
| 183 |
+
timeout=60,
|
| 184 |
+
**kwargs):
|
| 185 |
+
|
| 186 |
+
self.judge = build_judge(model=judge, retry=retry, timeout=timeout)
|
| 187 |
+
self.tmpl = tmpl if tmpl not in self.tmpl_map else self.tmpl_map[tmpl]
|
| 188 |
+
self.lower_case = lower_case
|
| 189 |
+
self.rule_only = rule_only
|
| 190 |
+
if retry >= 3:
|
| 191 |
+
self.t_series = list(np.arange(0, 1, 1 / (retry - 1))) + [1.0, ]
|
| 192 |
+
else:
|
| 193 |
+
self.t_series = [0, 1] if retry == 2 else [0, ]
|
| 194 |
+
self.match = -1
|
| 195 |
+
|
| 196 |
+
def verify(self, prediction, gt, **kwargs):
|
| 197 |
+
prediction = str(prediction)
|
| 198 |
+
gt = str(gt)
|
| 199 |
+
if self.tmpl is not None:
|
| 200 |
+
matches = re.findall(self.tmpl, prediction)
|
| 201 |
+
if len(matches):
|
| 202 |
+
if isinstance(self.match, int) and self.match < len(matches):
|
| 203 |
+
prediction = matches[self.match]
|
| 204 |
+
if self.lower_case:
|
| 205 |
+
prediction = prediction.lower()
|
| 206 |
+
gt = gt.lower()
|
| 207 |
+
prediction = _process_digit_article(prediction)
|
| 208 |
+
gt = _process_digit_article(gt)
|
| 209 |
+
if gt == prediction:
|
| 210 |
+
return True, 'Rule Match'
|
| 211 |
+
else:
|
| 212 |
+
if self.rule_only:
|
| 213 |
+
return False, f'Rule Match: Prediction: {prediction}, GT: {gt}'
|
| 214 |
+
judge_prompt = EVAL_TMPL.format(response=prediction, ground_truth=gt)
|
| 215 |
+
for t in self.t_series:
|
| 216 |
+
res = self.judge.generate(judge_prompt, temperature=t, **kwargs)
|
| 217 |
+
answer = YOrN_Extraction(res)
|
| 218 |
+
if answer in ['Yes', 'No']:
|
| 219 |
+
return answer == 'Yes', 'Judge Match'
|
| 220 |
+
return False, f'Judge Failed: Prediction: {prediction}, GT: {gt}'
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/qbench_video.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from .multiple_choice import extract_answer_from_item
|
| 3 |
+
from PIL import Image, ImageOps
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 7 |
+
|
| 8 |
+
VQA_JUDGE_SYS_PROMPT = """
|
| 9 |
+
You are a helpful assistant that grades answers related to visual video quality.
|
| 10 |
+
There are a lot of special terms or keywords related to video processing and photography.
|
| 11 |
+
You will pay attention to the context of `quality evaluation' when grading.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
VQA_JUDGE_USER_PROMPT = """
|
| 15 |
+
Given the question {}, evaluate whether the response {} completely matches the correct answer {}.
|
| 16 |
+
First, check the response and please rate score 0 if the response is not a valid answer.
|
| 17 |
+
Please rate score 2 if the response completely or almost completely matches the correct answer on completeness, accuracy, and relevance.
|
| 18 |
+
Please rate score 1 if the response partly matches the correct answer on completeness, accuracy, and relevance.
|
| 19 |
+
Please rate score 0 if the response doesn't match the correct answer on completeness, accuracy, and relevance at all.
|
| 20 |
+
Please only provide the result in the following format: Score:'
|
| 21 |
+
""" # noqa: E501
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def check_ans_mcq(pred, ans, correct_choice, correct_answer):
|
| 25 |
+
flag = False
|
| 26 |
+
|
| 27 |
+
if correct_choice == pred or correct_choice + "." in pred or correct_answer == pred:
|
| 28 |
+
flag = True
|
| 29 |
+
elif correct_choice in pred.split("\n"):
|
| 30 |
+
flag = True
|
| 31 |
+
|
| 32 |
+
return flag
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def check_ans_vqa(model, line):
|
| 36 |
+
score = model.generate(VQA_JUDGE_USER_PROMPT.format(line['question'], line['prediction'], line['answer'])).strip()
|
| 37 |
+
return score
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_dimension_rating(score_file):
|
| 41 |
+
score = load(score_file)
|
| 42 |
+
result_dict = {}
|
| 43 |
+
for idx, item in score.iterrows():
|
| 44 |
+
question_type = eval(item['dimensions'])[0].split(',')[0]
|
| 45 |
+
if question_type not in result_dict:
|
| 46 |
+
result_dict[question_type] = [0, 0]
|
| 47 |
+
result_dict[question_type][0] += int(item['score'])
|
| 48 |
+
result_dict[question_type][1] += 1
|
| 49 |
+
return result_dict
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/qspatial.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from ...utils import can_infer
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_gpt4_ICE_for_qspatial():
|
| 9 |
+
example_1 = """
|
| 10 |
+
Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit,
|
| 11 |
+
e.g., (1, m), (2.2, cm), (3.12, meter), at the end.\n
|
| 12 |
+
Model response: **Object Identification**
|
| 13 |
+
|
| 14 |
+
* The object in question is a chair.
|
| 15 |
+
* The chair is not visible in the image.
|
| 16 |
+
|
| 17 |
+
**Conclusion**
|
| 18 |
+
|
| 19 |
+
The height of the chair cannot be determined from the provided image.\n
|
| 20 |
+
Extracted answer: (0, cm)
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
example_2 = """
|
| 24 |
+
Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit,
|
| 25 |
+
e.g., (1, inch), (1.2, cm), (3.0, feet), at the end.\n
|
| 26 |
+
Model response: **Step 1: Identify the stapler and the recycle bin in the image.**
|
| 27 |
+
|
| 28 |
+
The stapler is located on the wooden table, and the recycle bin is located on the floor.
|
| 29 |
+
|
| 30 |
+
**Step 2: Determine the distance between the stapler and the recycle bin.**
|
| 31 |
+
|
| 32 |
+
The stapler is 0.5 meters from the edge of the table, and the recycle bin is 1.5 meters from the edge of the table.
|
| 33 |
+
Therefore, the minimum distance between the stapler and the recycle bin is 1.5 - 0.5 = 1 meter.
|
| 34 |
+
|
| 35 |
+
**Answer:** 1 m\n
|
| 36 |
+
Extracted answer: (1, m)
|
| 37 |
+
"""
|
| 38 |
+
example_3 = """
|
| 39 |
+
Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit,
|
| 40 |
+
e.g., (1, foot), (2, cm), (4.3, meter), at the end.\n
|
| 41 |
+
Model response: The mirror in the image is approximately 5 feet 4 inches tall.\n
|
| 42 |
+
Extracted answer: (64, inch)
|
| 43 |
+
"""
|
| 44 |
+
example_4 = """
|
| 45 |
+
Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit,
|
| 46 |
+
e.g., (0.1, cm), (2.9, cm), (0.3, meter), at the end.\n
|
| 47 |
+
Model response: The minimum distance between the wooden chair and the chair near the camera in the image is 1.7 feet.\n
|
| 48 |
+
Extracted answer: (1.7, feet)
|
| 49 |
+
"""
|
| 50 |
+
example_5 = """
|
| 51 |
+
Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit,
|
| 52 |
+
e.g., (5.1, cm), (0.9, cm), (55, mm), at the end.\n
|
| 53 |
+
Model response: The height of the painting's bottom edge from the floor is approximately 4.5 feet.\n
|
| 54 |
+
Extracted answer: (4.5, feet)
|
| 55 |
+
"""
|
| 56 |
+
return [example_1, example_2, example_3, example_4, example_5]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def list_to_dict(lst):
|
| 60 |
+
return {chr(65 + i): val for i, val in enumerate(lst)}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def post_check(line, prefetch=False):
|
| 64 |
+
res = None
|
| 65 |
+
ans = line['answer']
|
| 66 |
+
response = line['prediction'] if prefetch else line['res']
|
| 67 |
+
try:
|
| 68 |
+
if line['question_type'] == 'multi_choice':
|
| 69 |
+
ans = line['answer_option']
|
| 70 |
+
choices = list_to_dict(eval(line['choices']))
|
| 71 |
+
res = can_infer(response, choices)
|
| 72 |
+
if prefetch:
|
| 73 |
+
return res
|
| 74 |
+
else:
|
| 75 |
+
if line['answer_type'] == 'integer':
|
| 76 |
+
res = int(response)
|
| 77 |
+
ans = int(line['answer'])
|
| 78 |
+
elif line['answer_type'] == 'float':
|
| 79 |
+
res = float(response)
|
| 80 |
+
ans = float(line['answer'])
|
| 81 |
+
else:
|
| 82 |
+
res = str(res)
|
| 83 |
+
ans = str(ans)
|
| 84 |
+
except ValueError:
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
if res == ans:
|
| 88 |
+
return res if prefetch else True
|
| 89 |
+
else:
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def build_qspatial_gpt4_prompt(line):
|
| 94 |
+
task_description = """
|
| 95 |
+
Please read the following example.
|
| 96 |
+
Then extract the answer from the model response and type it at the end of the prompt.\n
|
| 97 |
+
"""
|
| 98 |
+
prediction = str(line['prediction'])
|
| 99 |
+
prompt = task_description
|
| 100 |
+
examples = get_gpt4_ICE_for_qspatial()
|
| 101 |
+
for example in examples:
|
| 102 |
+
prompt += example + '\n'
|
| 103 |
+
prompt += 'Model respone: ' + prediction
|
| 104 |
+
prompt += '\nExtracted answer:'
|
| 105 |
+
return prompt
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def QSpatial_auxeval(model, line):
|
| 109 |
+
prompt = build_qspatial_gpt4_prompt(line)
|
| 110 |
+
|
| 111 |
+
log = ''
|
| 112 |
+
retry = 5
|
| 113 |
+
for i in range(retry):
|
| 114 |
+
prediction = line['prediction']
|
| 115 |
+
res = model.generate(prompt, temperature=i * 0.5)
|
| 116 |
+
|
| 117 |
+
if FAIL_MSG in res:
|
| 118 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 119 |
+
else:
|
| 120 |
+
log += 'Succeed'
|
| 121 |
+
return dict(log=log, res=res)
|
| 122 |
+
log += 'All 5 retries failed.\n'
|
| 123 |
+
return dict(log=log, res='')
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/seephys.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...smp import *
|
| 2 |
+
from collections import OrderedDict, defaultdict
|
| 3 |
+
import re
|
| 4 |
+
from sympy.parsing.latex import parse_latex
|
| 5 |
+
from sympy import latex, Eq, simplify
|
| 6 |
+
FAIL_MSG = 'Failed to obtain answer via API.'
|
| 7 |
+
|
| 8 |
+
prompt_scoring = r"""
|
| 9 |
+
You are a physics professor, please determine if the Standard answer and Model Answer are equivalent. Note that the significant figures in the answer must meet the requirements. Your judgment should be 0 (non-equivalent) or 1 (equivalent).
|
| 10 |
+
|
| 11 |
+
[Question]: A force of 20 N acts on an object of mass 5 kg. What is the acceleration of the object?
|
| 12 |
+
[Standard Answer]: 4 m/s²
|
| 13 |
+
[Model Answer] : 4
|
| 14 |
+
Judgement: 1
|
| 15 |
+
|
| 16 |
+
[Question]: A projectile is launched at an angle $\\theta$ with initial velocity $v_0$. What is its time of flight before returning to the same height, assuming negligible air resistance and gravitational acceleration $g$?
|
| 17 |
+
[Standard Answer]: $$ t = \\frac{{2 v_0 \\sin(\\theta)}}{{g}} $$
|
| 18 |
+
[Model Answer] : Extracted Answer: $$ t = \\frac{{2 v_0 \\cos(\\frac{\\pi}{2} - \\theta)}}{{g}} $$
|
| 19 |
+
Judgement: 1
|
| 20 |
+
|
| 21 |
+
[Question]: The position of a particle is given by $x(t) = 3t^2 - 2t + 5$ meters. What is its instantaneous velocity at $t=2$ seconds?
|
| 22 |
+
[Standard Answer]: 10 m/s
|
| 23 |
+
[Model Answer] : Velocity $v(t) = dx/dt = 6t - 2$. At $t=2s$, $v(2) = 6(2) - 2 = 12 - 2 = 10$. So the velocity is 10 m/s.
|
| 24 |
+
Judgement: 1
|
| 25 |
+
|
| 26 |
+
[Question]: A car travels North at 20 m/s. It then turns and travels East at 20 m/s. What is the magnitude of its change in velocity?
|
| 27 |
+
[Standard Answer]: Approximately 28.3 m/s
|
| 28 |
+
[Model Answer] : The change in velocity is 0 m/s because the speed is the same.
|
| 29 |
+
Judgement: 0
|
| 30 |
+
|
| 31 |
+
[Question]: An object is thrown horizontally from a height of 20m with an initial speed of 10 m/s. Calculate: (a) the time it takes to hit the ground ($t_g$), and (b) the horizontal distance ($d_x$) it travels before hitting the ground. (Use g = 10 m/s²)
|
| 32 |
+
[Standard Answer]: (a) $t_g = 2$ s, (b) $d_x = 20$ m
|
| 33 |
+
[Model Answer] : (a) The time to hit the ground $t_g$ is 2 s. (b) The horizontal distance $d_x$ is 10 m.
|
| 34 |
+
Judgement: 0
|
| 35 |
+
|
| 36 |
+
[Question]: An engine performs $1.2 \\times 10^5$ J of work in 2 minutes. What is its average power output in watts?
|
| 37 |
+
[Standard Answer]: 1 kW
|
| 38 |
+
[Model Answer] : Power = Work / Time = $1.2 \\times 10^5$ J / (2 min * 60 s/min) = $1.2 \\times 10^5$ J / 120 s = 1000 W.
|
| 39 |
+
Judgement: 1
|
| 40 |
+
|
| 41 |
+
[Question]: A resistor has a voltage of 10V across it and a current of 2A flowing through it. What is its resistance and power dissipation?
|
| 42 |
+
[Standard Answer]: Resistance R = 5 Ohms , Power P = 20 Watts.
|
| 43 |
+
[Model Answer] : The resistance is $R = V/I = 10V / 2A = 5 \Omega$. The power dissipated is $P = VI = 10V \\times 2A = 20W$.
|
| 44 |
+
Judgement: 1
|
| 45 |
+
|
| 46 |
+
[Question]: The displacement of an object in Simple Harmonic Motion (SHM) is given by $x(t) = A \sin(\omega t)$. Determine the equation for its acceleration, $a(t)$.
|
| 47 |
+
[Standard Answer]: $$ a(t) = -A\omega^2 \sin(\omega t) $$
|
| 48 |
+
[Model Answer] : The acceleration is the second derivative of displacement. $v(t) = A\omega \cos(\omega t)$. $a(t) = A\omega^2 \cos\left(\omega t + \\frac{\pi}{2}\\right)$.
|
| 49 |
+
Judgement: 1
|
| 50 |
+
|
| 51 |
+
[Question]: 给出相对论性粒子总能量 $E$ 的速度展开式(到 $v^4/c^4$ 项)。
|
| 52 |
+
[Standard Answer]: $E = mc^2 \left(1 + \frac{v^2}{2c^2} + \frac{3v^4}{8c^4} + \mathcal{O}(v^6/c^6)\right)$
|
| 53 |
+
[Model Answer]: $E = \gamma m c^2 = \frac{mc^2}{\sqrt{1 - v^2/c^2}} \approx mc^2 + \frac{1}{2}mv^2 + \frac{3}{8} \frac{mv^4}{c^2}$
|
| 54 |
+
Judgement: 1
|
| 55 |
+
|
| 56 |
+
[Question]: 计算粒子能量 $E$ 穿过势垒 $V_0$ ($E < V_0$) 的透射系数 $T$。
|
| 57 |
+
[Standard Answer]: $\ln T \approx \ln 16 + \ln\left(\frac{E}{V_0}\right) + \ln\left(1 - \frac{E}{V_0}\right) - \frac{2d}{\hbar} \sqrt{2m(V_0 - E)}$
|
| 58 |
+
[Model Answer]: $T \approx 16 \frac{E}{V_0} \left(1 - \frac{E}{V_0}\right) e^{-2d\sqrt{2m(V_0 - E)}/\hbar}$
|
| 59 |
+
Judgement: 1
|
| 60 |
+
|
| 61 |
+
[Question]: The position of a particle is given by $x(t) = (2t^3 - 3t)$ meters. What is its acceleration at $t=1$ second? The final answer should retain 3 significant figures.
|
| 62 |
+
[Standard Answer]: 12.0 m/s²
|
| 63 |
+
[Model Answer] : $v(t) = 6t^2 - 3$. $a(t) = 12.1t$. At $t=1s$, $a(1) = 12.1 \\text{ m/s}^2$.
|
| 64 |
+
Judgement: 0
|
| 65 |
+
---
|
| 66 |
+
Now please provide your judgement (0 or 1), DONNOT output explanation:
|
| 67 |
+
""" # noqa
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_example():
|
| 71 |
+
example_1 = """
|
| 72 |
+
Question: What is the net force acting on a 5 kg object accelerating at 3 m/s² to the right?\n
|
| 73 |
+
Model response: Using F = ma, the net force is 15 N to the right.\n
|
| 74 |
+
Extracted answer: the net force is 15 N to the right.
|
| 75 |
+
""" # noqa
|
| 76 |
+
|
| 77 |
+
example_2 = """
|
| 78 |
+
Question: Calculate the charge of an electron. (Unit: C)\n
|
| 79 |
+
Model response: The elementary charge of an electron is approximately -1.602 × 10⁻¹⁹ coulombs.\n
|
| 80 |
+
Extracted answer: -1.602 × 10⁻¹⁹ C
|
| 81 |
+
""" # noqa
|
| 82 |
+
|
| 83 |
+
example_3 = """
|
| 84 |
+
Question: How much money does Luca need to buy a sour apple candy and a butter-scotch candy? (Unit: $)\n
|
| 85 |
+
Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.\n
|
| 86 |
+
Extracted answer: $1.45
|
| 87 |
+
""" # noqa
|
| 88 |
+
|
| 89 |
+
example_4 = """
|
| 90 |
+
Question: Between which frequencies does human hearing typically range? \n
|
| 91 |
+
Model response: Human hearing ranges between 20 Hz and 20,000 Hz.\n
|
| 92 |
+
Extracted answer: [20 Hz, 20000 Hz]
|
| 93 |
+
""" # noqa
|
| 94 |
+
|
| 95 |
+
example_5 = """
|
| 96 |
+
Question: List the wavelengths of visible light colors.\n
|
| 97 |
+
Model response: Visible light ranges from:\n
|
| 98 |
+
- Red: ~700 nm\n
|
| 99 |
+
- Green: ~550 nm\n
|
| 100 |
+
- Blue: ~450 nanometre\n
|
| 101 |
+
Extracted answer: Red: 700 nm; Green: 550 nm; Blue: 450 nanometre.
|
| 102 |
+
""" # noqa
|
| 103 |
+
return [example_1, example_2, example_3, example_4, example_5]
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def build_extract_prompt(line):
|
| 107 |
+
task_description = """
|
| 108 |
+
Please read the following example.
|
| 109 |
+
Then extract the answer from the model response and type it at the end of the prompt.\n # noqa
|
| 110 |
+
"""
|
| 111 |
+
question = "Please answer this question in the image." if str(line['question']) == 'nan' else line['question'] # noqa
|
| 112 |
+
|
| 113 |
+
prediction = extract_by_rule(line)
|
| 114 |
+
prompt = task_description
|
| 115 |
+
examples = get_example()
|
| 116 |
+
for example in examples:
|
| 117 |
+
prompt += example + '\n'
|
| 118 |
+
prompt += question + '\n'
|
| 119 |
+
prompt += 'Model response: ' + prediction
|
| 120 |
+
prompt += 'Extracted answer:'
|
| 121 |
+
return prompt
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def list_to_dict(lst):
|
| 125 |
+
return {chr(65 + i): val for i, val in enumerate(lst)}
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def extract_by_rule(line):
|
| 129 |
+
response = line['prediction']
|
| 130 |
+
try:
|
| 131 |
+
pattern = r'<answer>\s*(\d+)\s*</answer>'
|
| 132 |
+
match = re.search(pattern, response)
|
| 133 |
+
if match:
|
| 134 |
+
response = match.group(1)
|
| 135 |
+
return response
|
| 136 |
+
|
| 137 |
+
except Exception:
|
| 138 |
+
pass
|
| 139 |
+
try:
|
| 140 |
+
pattern = r"the final answer is: (.+?)\."
|
| 141 |
+
match = re.search(pattern, response)
|
| 142 |
+
if match:
|
| 143 |
+
response = match.group(1)
|
| 144 |
+
return response
|
| 145 |
+
except Exception:
|
| 146 |
+
pass
|
| 147 |
+
try:
|
| 148 |
+
pattern = r"The answer is: (.+?)\."
|
| 149 |
+
match = re.search(pattern, response)
|
| 150 |
+
if match:
|
| 151 |
+
response = match.group(1)
|
| 152 |
+
return response
|
| 153 |
+
except Exception:
|
| 154 |
+
pass
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
response = int(response)
|
| 158 |
+
return str(response)
|
| 159 |
+
except Exception:
|
| 160 |
+
pass
|
| 161 |
+
try:
|
| 162 |
+
response = float(response)
|
| 163 |
+
return str(response)
|
| 164 |
+
except Exception:
|
| 165 |
+
pass
|
| 166 |
+
return str(response)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def quick_compare(response, answer, tol=1e-6):
|
| 170 |
+
if response is None or answer is None:
|
| 171 |
+
return False
|
| 172 |
+
|
| 173 |
+
# 场景1:比较数值(如 "3.1415" vs "\pi")
|
| 174 |
+
if response.is_Number and answer.is_Number:
|
| 175 |
+
return abs(float(response - answer)) < tol
|
| 176 |
+
# 场景2:比较等式(如 "x = y" vs "y = x")
|
| 177 |
+
if isinstance(response, Eq) and isinstance(answer, Eq):
|
| 178 |
+
return simplify(response.lhs - response.rhs) == simplify(answer.lhs - answer.rhs)
|
| 179 |
+
|
| 180 |
+
# 场景3:比较表达式(如 "\frac{x}{y}" vs "x/y")
|
| 181 |
+
return simplify(response - answer) == 0
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def post_check(line, prefetch=False):
|
| 185 |
+
# prefetch: return extracted response
|
| 186 |
+
ans = line['answer']
|
| 187 |
+
try:
|
| 188 |
+
res = extract_by_rule(line)
|
| 189 |
+
except ValueError:
|
| 190 |
+
return False
|
| 191 |
+
|
| 192 |
+
if str(res) == str(ans):
|
| 193 |
+
return str(res) if prefetch else True
|
| 194 |
+
|
| 195 |
+
try:
|
| 196 |
+
parsed_res = parse_latex(res)
|
| 197 |
+
parsed_ans = parse_latex(ans)
|
| 198 |
+
if quick_compare(parsed_res, parsed_ans):
|
| 199 |
+
return latex(parsed_res) if prefetch else True
|
| 200 |
+
except Exception:
|
| 201 |
+
return False
|
| 202 |
+
return False
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def extract(model, line):
|
| 206 |
+
log = ''
|
| 207 |
+
retry = 5
|
| 208 |
+
if post_check(line, prefetch=False):
|
| 209 |
+
res = post_check(line, prefetch=True)
|
| 210 |
+
return dict(log='Prefetch succeed', extract=res, score=1)
|
| 211 |
+
else:
|
| 212 |
+
prompt = build_extract_prompt(line)
|
| 213 |
+
for i in range(retry):
|
| 214 |
+
prediction = line['prediction']
|
| 215 |
+
res = model.generate(prompt, temperature=i * 0.5) # extract
|
| 216 |
+
if not res or FAIL_MSG in res:
|
| 217 |
+
log += f'Try {i}: output is {prediction}, failed to parse.\n'
|
| 218 |
+
else:
|
| 219 |
+
log += 'Succeed'
|
| 220 |
+
score = score_func(model, res, line['question'], line['answer'])
|
| 221 |
+
if score is None:
|
| 222 |
+
log += '\nScore failed'
|
| 223 |
+
return dict(log=log, extract=res, score=-1)
|
| 224 |
+
return dict(log=log, extract=res, score=score)
|
| 225 |
+
log += 'All 5 retries failed.\n'
|
| 226 |
+
return dict(log=log, extract='', score=-1)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def score_func(model, response, query, gt):
|
| 230 |
+
if not response:
|
| 231 |
+
return 0
|
| 232 |
+
try:
|
| 233 |
+
full_prompt = prompt_scoring.strip() + f"\n[Question]: \{query}\\n[Standard Answer]: {gt}\\n[Model Answer]: {response}\\nJudgement: " # noqa
|
| 234 |
+
try_n = 0
|
| 235 |
+
while try_n < 5:
|
| 236 |
+
score = model.generate(full_prompt, temperature=try_n * 0.3)
|
| 237 |
+
if 'Judgement: ' in score:
|
| 238 |
+
score = score.split('Judgement: ')[-1]
|
| 239 |
+
elif 'Judgement:' in score:
|
| 240 |
+
score = score.split('Judgement:')[-1]
|
| 241 |
+
elif 'judgement: ' in score:
|
| 242 |
+
score = score.split('judgement: ')[-1]
|
| 243 |
+
elif 'judgement:' in score:
|
| 244 |
+
score = score.split('judgement:')[-1]
|
| 245 |
+
try:
|
| 246 |
+
if int(score) == 0 or int(score) == 1:
|
| 247 |
+
return int(score)
|
| 248 |
+
except Exception:
|
| 249 |
+
continue
|
| 250 |
+
except Exception as e:
|
| 251 |
+
print("score_func Error!")
|
| 252 |
+
print(e)
|
| 253 |
+
return None
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def eval_acc(result_file):
|
| 257 |
+
data = load(result_file)
|
| 258 |
+
keys = ['level', 'subject', 'language', 'source', 'vision_relevance', 'img_category', 'sig_figs']
|
| 259 |
+
keys = [k for k in keys if k in data]
|
| 260 |
+
tot = {k: defaultdict(lambda: 0) for k in keys}
|
| 261 |
+
fetch = {k: defaultdict(lambda: 0) for k in keys}
|
| 262 |
+
hit = {k: defaultdict(lambda: 0) for k in keys}
|
| 263 |
+
tot['Overall'] = 0
|
| 264 |
+
fetch['Overall'] = 0
|
| 265 |
+
hit['Overall'] = 0
|
| 266 |
+
|
| 267 |
+
lt = len(data)
|
| 268 |
+
for i in range(lt):
|
| 269 |
+
item = data.iloc[i]
|
| 270 |
+
tot['Overall'] += 1
|
| 271 |
+
for k in keys:
|
| 272 |
+
value = str(item[k])
|
| 273 |
+
tot[k][value] += 1
|
| 274 |
+
|
| 275 |
+
if 'Prefetch succeed' in item['log']:
|
| 276 |
+
fetch['Overall'] += 1
|
| 277 |
+
for k in keys:
|
| 278 |
+
value = str(item[k])
|
| 279 |
+
fetch[k][value] += 1
|
| 280 |
+
|
| 281 |
+
if post_check(item, prefetch=False):
|
| 282 |
+
hit['Overall'] += 1
|
| 283 |
+
for k in keys:
|
| 284 |
+
value = str(item[k])
|
| 285 |
+
hit[k][value] += 1
|
| 286 |
+
elif item['score'] == 1:
|
| 287 |
+
hit['Overall'] += 1
|
| 288 |
+
for k in keys:
|
| 289 |
+
value = str(item[k])
|
| 290 |
+
hit[k][value] += 1
|
| 291 |
+
|
| 292 |
+
res = {k: defaultdict(lambda: 0) for k in keys}
|
| 293 |
+
res['acc'] = 0
|
| 294 |
+
res['prefetch_rate'] = 0
|
| 295 |
+
|
| 296 |
+
res['acc'] = hit['Overall'] / tot['Overall'] * 100 if tot['Overall'] > 0 else 0
|
| 297 |
+
res['prefetch_rate'] = fetch['Overall'] / tot['Overall'] * 100 if tot['Overall'] > 0 else 0
|
| 298 |
+
|
| 299 |
+
def calculate_accuracy(hit_dict, tot_dict, res_dict, category):
|
| 300 |
+
for key in tot_dict[category]:
|
| 301 |
+
total = tot_dict[category][key]
|
| 302 |
+
hits = hit_dict[category][key]
|
| 303 |
+
res_dict[category][key] = hits / total * 100 if total > 0 else 0
|
| 304 |
+
|
| 305 |
+
for category in keys:
|
| 306 |
+
calculate_accuracy(hit, tot, res, category)
|
| 307 |
+
res_dict = {
|
| 308 |
+
'Overall': {
|
| 309 |
+
'Accuracy (%)': res['acc'], 'PrefetchRate (%)': res['prefetch_rate']
|
| 310 |
+
}, **{cat: dict(res[cat]) for cat in keys}
|
| 311 |
+
}
|
| 312 |
+
return res_dict
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/__init__.py
ADDED
|
File without changes
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/aquarium/filter_prompt.json
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"role": "system",
|
| 4 |
+
"content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0, e, or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Sometimes perception is not given but answer is there, you should put a Wrong for perception and process the answer. Your goal is only to format the user's input, not to check if it is correct or not."},
|
| 5 |
+
{
|
| 6 |
+
"role": "user",
|
| 7 |
+
"content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"Based on the numbers given on the sides, each row and column must have 2 filled cells. To ensure contiguity at the bottom, the filled cells should be placed in the bottom two rows. The first and third columns have a constraint of 4 filled cells, so those columns must have the bottom two cells filled. The second and fourth columns can have any two cells filled in the bottom two rows.\",\n\"answer\": [\n [\"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"e\", \"s\", \"e\"]\n]\n}\n```"
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"role": "assistant",
|
| 11 |
+
"content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,0,0,0],[0,0,0,0],[s,s,s,s],[s,0,s,0]]"
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"role": "user",
|
| 15 |
+
"content": "Yes. "
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"role": "user",
|
| 19 |
+
"content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"The puzzle is a 4x5 grid. Each column and row has a number indicating how many cells should be filled in that row/column. The filled cells must be contiguous. \\nLet's start with the first column which has 4 filled cells. Since the filled cells must be contiguous, we can fill the bottom four cells of the first column. Similarly, the last column also has 4 filled cells, so we fill the bottom four cells. The second and third columns have 2 filled cells each. We can fill the bottom two cells in each of these columns. This satisfies the column constraints. Now let's check the row constraints. The bottom two rows have 4 filled cells each, which is satisfied by our current solution. The top two rows have 2 filled cells each, which is also satisfied. Therefore, this is the solution.\",\n \"answer\": [\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"]\n ]\n}\n```"
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"role": "assistant",
|
| 23 |
+
"content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,0,0,0],[0,0,0,0,0],[s,s,s,s,s],[s,s,s,s,s],[s,s,s,s,s]]"
|
| 24 |
+
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"role": "user",
|
| 28 |
+
"content": "Yes. "
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"role": "user",
|
| 32 |
+
"content":"```json\n{\n\"perception\": [[\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n\"answer\": [[\"s\", \"s\", \"e\", \"e\"], [\"s\", \"s\", \"e\", \"e\"], [\"e\", \"e\", \"s\", \"s\"], [\"e\", \"e\", \"s\", \"s\"]]\n}\n```"
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"role": "assistant",
|
| 36 |
+
"content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[s,s,0,0],[s,s,0,0],[0,0,s,s],[0,0,s,s]]"
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"role": "user",
|
| 40 |
+
"content": "Yes. "
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"role": "user",
|
| 44 |
+
"content": "**Current Game State:**\n```json\n{\n\"perception\": [\n[\"s\", \"e\", \"e\", \"e\"],\n[\"e\", \"e\", \"e\", \"s\"],\n[\"e\", \"s\", \"e\", \"s\"],\n[\"e\", \"e\", \"e\", \"s\"],\n[\"s\", \"e\", \"s\", \"e\"],\n[\"e\", \"s\", \"e\", \"e\"],\n[\"e\", \"e\", \"e\", \"s\"]\n]\n}\n```\n\n**Solution:**\n```json\n{\n\"answer\": [\n[\"s\", \"s\", \"e\", \"s\"],\n[\"e\", \"s\", \"s\", \"s\"],\n[\"e\", \"s\", \"e\", \"s\"],\n[\"e\", \"e\", \"s\", \"s\"],\n[\"s\", \"s\", \"e\", \"e\"],\n[\"e\", \"s\", \"e\", \"s\"],\n[\"s\", \"e\", \"e\", \"s\"]\n]\n}\n```\n\n**Final Answer:**\nThe solution is represented by the \"answer\" array, where \"s\" denotes filled cells and \"e\" denotes empty cells."
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"role": "assistant",
|
| 48 |
+
"content": "Initial State\n[[s,0,0,0],[0,0,0,s],[0,s,0,s],[0,0,0,s],[s,0,s,0],[0,s,0,0],[0,0,0,s]]\nSolution\n[[s,s,0,s],[0,s,s,s],[0,s,0,s],[0,0,s,s],[s,s,0,0],[0,s,0,s],[s,0,0,s]]"
|
| 49 |
+
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"role": "user",
|
| 53 |
+
"content": "Yes. "
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"role": "user",
|
| 57 |
+
"content": "```json\n{\n\"perception\": [\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0]\n],\n\"think\": \"The grid is initially empty. We need to fill the aquariums to satisfy the given row and column constraints. The numbers on the sides indicate the total number of filled cells in each row and column. We can start by placing the first filled cell in each row and column as shown in the solution. Then, we can fill the remaining cells while ensuring that each aquarium's water cells are contiguous at the bottom of that region.\",\n\"answer\": [\n[0, 0, 0, 0, 0],\n[0, 1, 1, 0, 0],\n[0, 1, 1, 0, 0],\n[0, 1, 1, 0, 0],\n[0, 1, 1, 0, 0]\n]\n}"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"role": "assistant",
|
| 61 |
+
"content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\nWrong"
|
| 62 |
+
|
| 63 |
+
},
|
| 64 |
+
{
|
| 65 |
+
"role": "user",
|
| 66 |
+
"content": "Yes. Because 1 is undefined, so put a Wrong there"
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"role": "user",
|
| 70 |
+
"content": "```json\n{\n\"perception\": [\n[0, 0, 0, 0],\n[0, 0, 0, 0],\n[0, 2, 2, 0],\n[0, 2, 2, 0]\n],\n\"answer\": [\n[0, 0, s, s],\n[0, 0, s, s],\n[0, s, s, 0],\n[0, s, s, 0]\n]\n}\n```"
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"role": "assistant",
|
| 74 |
+
"content": "Initial State\nWrong\nSolution\n[[0,0,s,s],[0,0,s,s],[0,s,s,0],[0,s,s,0]]"
|
| 75 |
+
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"role": "user",
|
| 79 |
+
"content": "Yes. As 2 is not defined"
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"role": "user",
|
| 83 |
+
"content": "```json\n{\n\"answer\": [\n [\"e\", \"e\", \"s\", \"s\"],\n [\"e\", \"s\", \"s\", \"e\"],\n [\"s\", \"s\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\"]\n]\n}\n```\n\nThis solution satisfies the given constraints:\n\n1. Each aquarium's water cells are contiguous at the bottom of that region.\n2. The row counts match the number of filled cells in each row: [0, 2, 2, 2].\n3. The column counts match the number of filled cells in each column: [2, 2, 2, 2].\n\nThe aquariums are filled as follows:\n- Aquarium 1: [[2, 0], [1, 0], [0, 0]]\n- Aquarium 2: [[2, 1], [2, 2]]\n- Aquarium 3: [[1, 3], [0, 3], [0, 2], [1, 2], [1, 1], [0, 1]]\n- Aquarium 4: [[2, 3], [3, 3], [3, 2]]"
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"role": "assistant",
|
| 87 |
+
"content": "Initial State\nWrong\nSolution\n[[0,0,s,s],[0,s,s,0],[s,s,0,0],[s,0,0,0]]"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"role": "user",
|
| 91 |
+
"content": "Yes. there is no perception, but the answer is valid, so put it there."
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"role": "user",
|
| 95 |
+
"content": "```json\n{\n \"think\": \"To solve this puzzle, I will first identify the filled cells and their positions in the given board state. Then, I will use these positions to fill the aquariums with water according to the given row and column counts. Finally, I will check if the solution is correct by ensuring that the water cells are contiguous at the bottom of each region.\",\n \"answer\": [\n [\"s\", \"s\", \"e\", \"e\"],\n [\"s\", \"s\", \"e\", \"e\"],\n [\"s\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"e\", \"s\"]\n ]\n}\n```"
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"role": "assistant",
|
| 99 |
+
"content": "Initial State\nWrong\nSolution\n[[s,s,0,0],[s,s,0,0],[s,s,0,0],[0,s,0,s]]"
|
| 100 |
+
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"role": "user",
|
| 104 |
+
"content": "Yes. there is no perception, but the answer is valid, so put it there."
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"role": "user",
|
| 108 |
+
"content": "```json\n{\n\"answer\": [\n[1, 1, 1, 1],\n[1, 0, 0, 1],\n[1, 0, 0, 1],\n[1, 0, 0, 1]\n]\n}\n```"
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"role": "assistant",
|
| 112 |
+
"content": "Initial State\nWrong\nSolution\nWrong"
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"role": "user",
|
| 116 |
+
"content": "Yes. there is no perception, so Wrong; for answer, 1 is not defined."
|
| 117 |
+
}
|
| 118 |
+
]
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/battleships/filter_prompt.json
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"role": "system",
|
| 4 |
+
"content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only three types values 0 or s; if you see * or _ or - or e or #, you should convert them into 0. But if there is any number in the grid, it is Wrong. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
|
| 5 |
+
},
|
| 6 |
+
{
|
| 7 |
+
"role": "user",
|
| 8 |
+
"content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"s\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"Based on the given information, there is a ship of size 2 and two ships of size 1. The row hints indicate that the ship of size 2 must be placed in the second row, and the column hints suggest that one of the size 1 ships must be placed in the second column. The other size 1 ship can be placed in either the first, third, or fourth column without violating the rules.\",\n\"answer\": [\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"s\"]\n]\n}\n```"
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"role": "assistant",
|
| 12 |
+
"content": "Initial State\n[[0,0,0,0],[0,s,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,s,0,0],[0,s,0,0],[0,0,s,0],[0,0,0,s]]"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"role": "user",
|
| 16 |
+
"content": "Yes. "
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"role": "user",
|
| 20 |
+
"content": "model_output\": \"```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"s\", \"s\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\", \"e\"]\n ]\n}\n```"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"role": "assistant",
|
| 24 |
+
"content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,s,s,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,s,s,0],[0,0,0,0,s],[0,s,0,0,0]]"
|
| 25 |
+
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"role": "user",
|
| 29 |
+
"content": "Yes. "
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"role": "user",
|
| 33 |
+
"content": "**Step 1: Analyze the given information**\n\n* The grid is 6x6.\n* There are three ships of sizes 2, 1, and 1.\n* The hints indicate that there are two cells in the first row and one cell in the second row.\n* There is one cell in the first column and one cell in the second column.\n\n**Step 2: Place the ships**\n\n* We can start by placing the two-cell ship in the first row. We can place it in the first two columns or the last two columns.\n* Let's place it in the first two columns. This leaves us with a 4x4 grid to place the remaining ships.\n* We can place the one-cell ship in the second row and the first column.\n* This leaves us with a 4x4 grid to place the remaining one-cell ship.\n* We can place it in the second row and the second column.\n\n**Step 3: Check for any constraints**\n\n* There are no constraints mentioned in the problem, so we can proceed with our current placement.\n\n**Step 4: Write the solution**\n\n```json\n{\"perception\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], \"think\": \"Place the two-cell ship in the first two columns, then place the one-cell ship in the second row and the first column, and finally place the remaining one-cell ship in the second row and the second column.\", \"answer\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]}\n```\n\n**Answer:** {\"perception\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], \"think\": \"Place the two-cell ship in the first two columns, then place the one-cell ship in the second row and the first column, and finally place the remaining one-cell ship in the second row and the second column.\", \"answer\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]}"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"role": "assistant",
|
| 37 |
+
"content": "Initial State\n[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]\nSolution\n[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]"
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"role": "user",
|
| 41 |
+
"content": "Yes. "
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"role": "user",
|
| 45 |
+
"content": "```json\n{\n\"perception\": [\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"]\n],\n\"think\": \"The grid is 4x4, and there are two ships: one of size 1 and one of size 2. The size-1 ship is already placed in the bottom-left corner (x1=0, y1=3). The size-2 ship must be placed horizontally or vertically, not overlapping with the size-1 ship.\",\n\"answer\": [\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"]\n]\n}\n```"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"role": "assistant",
|
| 49 |
+
"content": "Initial State\n[[0,s,0,0],[0,s,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,s,0,0],[0,s,0,0],[0,0,0,0],[0,0,0,0]]"
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"role": "user",
|
| 53 |
+
"content": "Yes. "
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"role": "user",
|
| 57 |
+
"content": "```json\n{\n\"perception\": [\n[\"*\", \"*\", \"*\", \"e\"],\n[\"*\", \"*\", \"s\", \"*\"],\n[\"*\", \"*\", \"s\", \"*\"],\n[\"*\", \"e\", \"*\", \"*\"]\n],\n\"think\": \"Let's analyze this 4x4 grid:\n1. We need to place 1 size-2 ship and 2 size-1 ships\n2. The row hints (0,2,1,1) and column hints (2,0,2,0) tell us how many cells are ships\n3. We can see:\n - A size-2 ship vertically placed in column 2, rows 1-2\n - Two water cells marked with dots\n4. Looking at row 0 with hint 0, all cells must be water\n5. Column 1 has hint 0, so all cells must be water\n6. The remaining two size-1 ships must be placed in valid positions without touching the existing size-2 ship\n7. Given the constraints, one size-1 ship must be in row 3, column 0 and the other in row 3, column 2\",\n\"answer\": [\n[\"e\", \"e\", \"e\", \"e\"],\n[\"e\", \"e\", \"s\", \"e\"],\n[\"e\", \"e\", \"s\", \"e\"],\n[\"s\", \"e\", \"s\", \"e\"]\n]\n}\n```"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"role": "assistant",
|
| 61 |
+
"content": "Initial State\n[[0,0,0,0],[0,0,s,0],[0,0,s,0],[0,0,0,0]]\nSolution\n[[0,0,0,0],[0,0,s,0],[0,0,s,0],[s,0,s,0]]"
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"role": "user",
|
| 65 |
+
"content": "Yes. "
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"role": "user",
|
| 69 |
+
"content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\"]\n ],\n \"think\": \"Analyze the given grid. We are given hints indicating how many ship cells are in each row and column. Small black circles represent single-cell ship cells, and two-cell ships must be placed as per the hints. From the row and column numbers, I will determine where the ship segments must go, ensuring no ships touch each other, even diagonally.\",\n \"answer\": [\n [],\n}\n```"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"role": "assistant",
|
| 73 |
+
"content": "Initial State\n[[0,0,0],[0,0,0],[0,0,0]]\nSolution\nWrong"
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"role": "user",
|
| 77 |
+
"content": "Yes. as there is no answer could be extracted from the model output"
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"role": "user",
|
| 81 |
+
"content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"s\"]\n ],\n \"answer\": [\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"s\"]\n ]\n}\n```"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"role": "assistant",
|
| 85 |
+
"content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,s]]\nSolution\n[[0,s,0,0],[0,0,s,0],[0,0,0,0],[s,0,0,s]]"
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"role": "user",
|
| 89 |
+
"content": "Yes. "
|
| 90 |
+
}
|
| 91 |
+
]
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/binairo/filter_prompt.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"role": "system",
|
| 4 |
+
"content":
|
| 5 |
+
"You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only three types values 0 or w or b; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, it is wrong.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"role": "user",
|
| 9 |
+
"content": "```json\n{\n\"perception\": [\n[\"*\", \"*\", \"w\", \"w\"],\n[\"b\", \"b\", \"*\", \"w\"],\n[\"w\", \"w\", \"*\", \"b\"],\n[\"w\", \"*\", \"*\", \"*\"]\n],\n\"think\": [\n\"1. Looking at row 1 (0-based), there are two black circles (b,b), so the remaining two must be white to avoid three consecutive blacks\",\n\"2. Looking at column 3 (0-based), there are already three whites (w,w,w), so the last cell must be black\",\n\"3. In row 2, we have (w,w,*,b) - to avoid three whites in a row, the empty cell must be black\",\n\"4. In row 3, we have (w,*,*,*) - given the column constraints and avoiding three consecutive same colors, it must be (w,b,w,b)\",\n\"5. This completes the grid while satisfying all constraints\"\n],\n\"answer\": [\n[\"b\", \"b\", \"w\", \"w\"],\n[\"b\", \"b\", \"w\", \"w\"],\n[\"w\", \"w\", \"b\", \"b\"],\n[\"w\", \"b\", \"w\", \"b\"]\n]\n}\n```"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"role": "assistant",
|
| 13 |
+
"content": "Perception\n[[0,0,w,w],[b,b,0,w],[w,w,0,b],[w,0,0,0]]\nAnswer\n[[b,b,w,w],[b,b,w,w],[w,w,b,b],[w,b,w,b]]"
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"role": "user",
|
| 17 |
+
"content": "Yes. "
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"role": "user",
|
| 21 |
+
"content": "```json\n{\n \"perception\": [\n [\"b\", \"b\", \"*\", \"*\"],\n [\"*\", \"b\", \"*\", \"w\"],\n [\"*\", \"*\", \"b\", \"b\"],\n [\"w\", \"*\", \"w\", \"b\"]\n ],\n \"think\": \"To solve this, we apply the Binairo rules. Here\u2019s the step-by-step reasoning: \n1. Row 0: The first two cells are 'b'. According to the rules, no more than two same-colored pieces can be adjacent. Thus, the third cell must be 'w'.\n2. Row 3: The fourth cell is 'b', and the second cell is blank. Following the row's structure and aiming for balanced numbers of 'b' and 'w', the second cell must be 'b.\"\n3. Each horizontal"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"role": "assistant",
|
| 25 |
+
"content": "Perception\n[[b,b,0,0],[0,b,0,w],[0,0,b,b],[w,0,w,b]]\nAnswer\nWrong"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"role": "user",
|
| 29 |
+
"content": "Yes. as there is no answer section, so you should put a Wrong there."
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"role": "user",
|
| 33 |
+
"content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"w\"],\n [\"b\", \"b\", \"*\", \"*\"],\n [\"w\", \"*\", \"w\", \"b\"],\n [\"w\", \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [\"b\", \"w\", \"b\", \"w\"],\n [\"b\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"]\n ]\n}\n```"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"role": "assistant",
|
| 37 |
+
"content": "Perception\n[[0,0,0,w],[b,b,0,0],[w,0,w,b],[w,0,0,0]]\nAnswer\n[[b,w,b,w],[b,b,w,b],[w,b,w,b],[w,b,w,b]]"
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"role": "user",
|
| 41 |
+
"content": "Yes. "
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"role": "user",
|
| 45 |
+
"content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"w\"],\n [\"b\", \"b\", \"*\", \"*\"],\n [\"w\", \"*\", \"w\", \"b\"],\n [\"w\", \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [\"b\", \"w\", \"b\", \"w\"],\n [\"b\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"]\n ]\n}\n```"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"role": "assistant",
|
| 49 |
+
"content": "Perception\n[[0,0,0,w],[b,b,0,0],[w,0,w,b],[w,0,0,0]]\nAnswer\n[[b,w,b,w],[b,b,w,b],[w,b,w,b],[w,b,w,b]]"
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"role": "user",
|
| 53 |
+
"content": "Yes. "
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"role": "user",
|
| 57 |
+
"content": "The user wants me to solve a Binairo puzzle.\nI need to identify the black and white circles in the image and represent them as 'b' and 'w' respectively. Empty cells will be represented by '*'.\nThen, I need to apply the Binairo rules to fill the empty cells.\nThe rules are:\n1. No more than two adjacent cells can have the same color.\n2. Each row and column must have an equal number of black and white cells.\n\nLet's represent the initial state of the grid.\nRow 0: *, *, *, b\nRow 1: w, *, *, *\nRow 2: *, b, w, *\nRow 3: b, *, *, w\n\nNow, let's apply the rules to fill the empty cells.\n\n**Step 1:** Analyze row 0. It has one 'b'. It needs one more 'b' and two 'w'.\n**Step 2:** Analyze row 1. It has one 'w'. It needs one more 'w' and two 'b'.\n**Step 3:** Analyze row 2. It has one 'b' and one 'w'. It needs one more 'b' and one more 'w'.\n**Step 4:** Analyze row 3. It has one 'b' and one 'w'. It needs one more 'b' and one more 'w'.\n\n**Step 5:** Analyze column 0. It has one 'w' and one 'b'. It needs one more 'w' and one more 'b'.\n**Step 6:** Analyze column 1. It has one 'b'. It needs one more 'b' and two 'w'.\n**Step 7:** Analyze column 2. It has one 'w'. It needs one more 'w' and two 'b'.\n**Step 8:** Analyze column 3. It has one 'b' and one 'w'. It needs one more 'b' and one more 'w'.\n\nLet's start filling based on the no-three-in-a-row rule.\nIn row 0, the last cell is 'b'. So the cell at (0,2) cannot be 'b'.\nIn row 3, the first cell is 'b'. So the cell at (3,1) cannot be 'b'.\n\nConsider row 0: *, *, *, b. If the second to last cell is 'b', we have *, *, b, b. Then the first two must be 'w'. So w, w, b, b. This is valid.\nConsider row 3: b, *, *, w. If the second cell is 'w', we have b, w, *, w. The third cell must be 'b'. So b, w, b, w. This is valid.\n\nLet's update the grid:\nw, w, b, b\nw, *, *, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, *, *, *. It needs one more 'w' and two 'b'.\nConsider row 2: *, b, w, *. It needs one more 'b' and one more 'w'.\n\nConsider column 1: w, *, b, w. The empty cell must be 'b'.\nConsider column 2: b, *, w, b. The empty cell must be 'w'.\n\nLet's update the grid:\nw, w, b, b\nw, b, w, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, b, w, *. The last cell must be 'b'.\nConsider row 2: *, b, w, *. The last cell must be 'b'.\n\nLet's update the grid:\nw, w, b, b\nw, b, w, b\n*, b, w, b\nb, w, b, w\n\nConsider row 2: *, b, w, b. The first cell must be 'w'.\n\nLet's update the grid:\nw, w, b, b\nw, b, w, b\nw, b, w, b\nb, w, b, w\n\nNow let's check the column counts.\nColumn 0: w, w, w, b (3w, 1b) - Incorrect.\nThere must be an error in the previous steps.\n\nLet's go back to the state:\nw, w, b, b\nw, *, *, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, *, *, *. It needs one more 'w' and two 'b'.\nIf the second cell is 'b', we have w, b, *, *.\nIf the third cell is 'b', we have w, b, b, *. The last cell must be 'w'. So w, b, b, w.\nConsider row 2: *, b, w, *. It needs one more 'b' and one more 'w'.\n\nLet's update the grid:\nw, w, b, b\nw, b, b, w\n*, b, w, *\nb, w, b, w\n\nConsider column 0: w, w, *, b. The empty cell must be 'b'.\nConsider column 3: b, w, *, w. The empty cell must be 'b'.\n\nLet's update the grid:\nw, w, b, b\nw, b, b, w\nb, b, w, b\nb, w, b, w\n\nConsider row 2: b, b, w, b. This violates the no-three-in-a-row rule.\n\nLet's go back to the state:\nw, w, b, b\nw, *, *, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, *, *, *. It needs one more 'w' and two 'b'.\nIf the second cell is 'w', we have w, w, *, *. The last two must be 'b'. So w, w, b, b. This is the same as row 0, which is not allowed.\n\nLet's try filling based on column constraints.\nColumn 0: w, *, *, b. It needs one 'w' and one 'b'.\nColumn 1: w, b, b, w. This is valid.\nColumn 2: b, w, w, b. This is valid.\nColumn 3: b, *, *, w. It needs one 'w' and one 'b'.\n\nLet's go back to the initial state:\n*, *, *, b\nw, *, *, *\n*, b, w, *\nb, *, *, w\n\nFrom row 0: *, *, *, b. The cells at (0,0) and (0,1) cannot be 'b'.\nFrom row 3: b, *, *, w. The cell at (3,1) cannot be 'b'.\n\nFrom column 1: *, *, b, *. The cells at (0,1) and (3,1) cannot be 'b'.\nFrom column 2: *, *, w, *. The cells at (0,2) and (3,2) cannot be 'w'.\n\nConsider row 0: *, *, *, b. If the first two are 'w', we have w, w, *, b. The third must be 'b'. So w, w, b, b.\nConsider row 3: b, *, *, w. If the middle two are 'w', we have b, w, w, w. This is invalid.\n\nLet's restart the thought process with the initial grid.\n```json\n{\n\"perception\": [\n[\"*\", \"*\", \"*\", \"b\"],\n[\"w\", \"*\", \"*\", \"*\"],\n[\"*\", \"b\", \"w\", \"*\"],\n[\"b\", \"*\", \"*\", \"w\"]\n],\n\"think\": \"The grid is 4x4. Each row and column must have two 'w' and two 'b'.\\n\\n**Step 1:** Analyze row 0: [*, *, *, b]. To avoid three consecutive 'b', the cells at (0,0) and (0,1) cannot both be 'b'.\\n\\n**Step 2:** Analyze row 3: [b, *, *, w]. To avoid three consecutive 'b', the cell at (3,1) cannot be 'b'.\\n\\n**Step 3:** Analyze column 1: [*, *, b, *]. To avoid three consecutive 'b', the cell at (0,1) cannot be 'b'.\\n\\n**Step 4:** Consider row 0: [*, *, *, b]. If we place 'w' in (0,0) and (0,1), we get [w, w, *, b]. The remaining cell must be 'b' to have two 'b's in the row: [w, w, b, b]. This is valid.\\n\\n**Step 5:** Consider row 3: [b, *, *, w]. If we place 'w' in (3,1), we get [b, w, *, w]. The remaining cell must be 'b': [b, w, b, w]. This is valid.\\n\\n**Step 6:** Update the grid:\\n[w, w, b, b]\\n[w, *, *, *]\\n[*, b, w, *]\\n[b, w, b, w]\\n\\n**Step 7:** Consider column 1: [w, *, b, w]. The missing cell must be 'b': [w, b, b, w].\\n\\n**Step 8:** Consider column 2: [b, *, w, b]. The missing cell must be 'w': [b, w, w, b].\\n\\n**Step 9:** Update the grid:\\n[w, w, b, b]\\n[w, b, w, *]\\n[*, b, w, b]\\n[b, w, b, w]\\n\\n**Step 10:** Consider row 1: [w, b, w, *]. The missing cell must be 'b': [w, b, w, b].\\n\\n**Step 11:** Consider row 2: [*, b, w, b]. The missing cell must be 'w': [w, b, w, b].\\n\\n**Step 12:** Update the grid:\\n[w, w, b, b]\\n[w, b, w, b]\\n[w, b, w, b]\\n[b, w, b, w]\\n\\n**Step 13:** Check column counts:\\nColumn 0: w, w, w, b (Incorrect)\\n\\nLet's backtrack to Step 6 and try a different approach.\\n\\n**Step 6 (Retry):** Consider row 1: [w, *, *, *]. To have two 'w's, let's place 'w' at (1,2). Then we have [w, *, w, *]. The remaining cells must be 'b': [w, b, w, b].\\n\\n**Step 7:** Consider row 2: [*, b, w, *]. To have two 'b's, let's place 'b' at (2,0). Then we have [b, b, w, *]. The remaining cell must be 'w': [b, b, w, w].\\n\\n**Step 8:** Update the grid:\\n[w, w, b, b]\\n[w, b, w, b]\\n[b, b, w, w]\\n[b, w, b, w]\\n\\n**Step 9:** Check column counts:\\nColumn 0: w, w, b, b\\nColumn 1: w, b, b, w\\nColumn 2: b, w, w, b\\nColumn 3: b, b, w, w\\nAll columns have two 'w' and two 'b'.\\n\\n**Step 10:** Check row counts:\\nRow 0: w, w, b, b\\nRow 1: w, b, w, b\\nRow 2: b, b, w, w\\nRow 3: b, w, b, w\\nAll rows have two 'w' and two 'b'.\\n\\n**Step 11:** Check for three consecutive same colors.\\nNo three consecutive same colors in any row or column.\",\n\"answer\": [\n[\"w\", \"w\", \"b\", \"b\"],\n[\"w\", \"b\", \"w\", \"b\"],\n[\"b\", \"b\", \"w\", \"w\"],\n[\"b\", \"w\", \"b\", \"w\"]\n]\n}\n```"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"role": "assistant",
|
| 61 |
+
"content": "Perception\n[[0,0,0,b],[w,0,0,0],[0,b,w,0],[b,0,0,w]]\nAnswer\n[[w,w,b,b],[w,b,w,b],[b,b,w,w],[b,w,b,w]]"
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"role": "user",
|
| 65 |
+
"content": "Yes. "
|
| 66 |
+
}
|
| 67 |
+
]
|
VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/fieldexplore/filter_prompt.json
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"role": "system",
|
| 4 |
+
"content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or s or numbers; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"},
|
| 5 |
+
{
|
| 6 |
+
"role": "user",
|
| 7 |
+
"content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"1\", \"*\", \"*\", \"2\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"*\", \"s\", \"3\", \"*\"]\n],\n\"think\": \"Based on the given information, we can make the following deductions:\n1. The cell with the value '1' has one mine adjacent to it, which must be the revealed mine cell.\n2. The cell with the value '2' in the second row has two mines adjacent to it. One of them is the revealed mine, so there must be another mine in one of the remaining hidden cells adjacent to it.\n3. The cell with the value '3' has three mines adjacent to it. One of them is the revealed mine, so there must be two more mines in the remaining hidden cells adjacent to it.\n4. The cell with the value '2' in the third row has two mines adjacent to it. One of them is the revealed mine, so there must be another mine in one of the remaining hidden cells adjacent to it.\n5. Based on the constraints of the '2' values, the only possible location for the second mine adjacent to the '2' in the third row is the cell to its left.\n6. With the second mine location determined, the remaining two mines adjacent to the '3' cell must be in the two cells above it.\",\n\"answer\": [\n [\"e\", \"e\", \"s\", \"e\"],\n [\"1\", \"e\", \"e\", \"2\"],\n [\"2\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"3\", \"s\"]\n]\n}\n```"
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"role": "assistant",
|
| 11 |
+
"content": "Initial State\n[[0,0,0,0],[1,0,0,2],[2,0,0,0],[0,s,3,0]]\nSolution\n[[0,0,s,0],[1,0,0,2],[2,s,0,0],[0,s,3,s]]"
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"role": "user",
|
| 15 |
+
"content": "Right"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"role": "user",
|
| 19 |
+
"content": "```json\n{\n \"perception\": [\n [1, 2, \"*\", \"*\", \"*\"],\n [1, \"*\", \"*\", 3, 2],\n [2, 3, \"*\", \"*\", \"*\"],\n [1, \"*\", \"*\", 3, \"*\"]\n ],\n \"think\": \"Let's analyze the grid step by step.\\n\\n1. Look at the '3' at (1, 3). It has at most one hidden mine. If there is a mine at (1, 2), then the '3' at (1, 3) will be satisfied. \\n2. Look at the '2' at (0, 1). It has at most one hidden mine. If there is a mine at (1, 1), then the '2' at (0, 1) will be satisfied. \\n3. Look at the '1' at (0, 0). It has at most one hidden mine. If there is a mine at (1, 0), then the '1' at (0, 0) will be satisfied. \\n4. Look at the '2' at (2, 0). It has at most one hidden mine. If there is a mine at (2, 1), then the '2' at (2, 0) will be satisfied. \\n5. Look at the '3' at (3, 3). It has at most one hidden mine. If there is a mine at (3, 2), then the '3' at (3, 3) will be satisfied. \\n6. Look at the '1' at (3, 0). It has at most one hidden mine. If there is a mine at (3, 1), then the '1' at (3, 0) will be satisfied. \\n7. Considering the above analysis, we can place mines at (1, 1), (1, 2), (2, 1), (3, 1), (3, 2).\",\n \"answer\": [\n [1, 2, \"e\", \"e\", \"e\"],\n [1, \"s\", \"s\", 3, 2],\n [2, 3, \"s\", \"e\", \"e\"],\n [1, \"s\", \"s\", 3, \"e\"]\n ]\n}\n```"
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"role": "assistant",
|
| 23 |
+
"content": "Initial State\n[[1,2,0,0,0],[1,0,0,3,2],[2,3,0,0,0],[1,0,0,3,0]]\nSolution\n[[1,2,0,0,0],[1,s,s,3,2],[2,3,s,0,0],[1,s,s,3,0]]"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"role": "user",
|
| 27 |
+
"content": "Right"
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"role": "user",
|
| 31 |
+
"content": "To solve this problem, we need to analyze the given grid and use the information provided by the numbers to deduce the locations of the mines. \n\nThe grid is a 4x4 grid with some cells already revealed with numbers indicating how many mines are adjacent to them (including diagonals). The goal is to identify the locations of the mines based on this information.\n\nLet's start by examining the revealed cells:\n\n* Cell (0,0) has a value of 1, indicating that there is one mine adjacent to it.\n* Cell (0,1) has a value of 2, indicating that there are two mines adjacent to it.\n* Cell (1,0) has a value of 1, indicating that there is one mine adjacent to it.\n* Cell (1,1) has a value of 3, indicating that there are three mines adjacent to it.\n* Cell (2,2) has a value of 2, indicating that there are two mines adjacent to it.\n* Cell (2,3) has a value of 3, indicating that there are three mines adjacent to it.\n* Cell (3,2) has a value of 2, indicating that there are two mines adjacent to it.\n* Cell (3,3) has a value of 3, indicating that there are three mines adjacent to it.\n\nBased on this information, we can start making deductions about the locations of the mines.\n\n* Since cell (0,0) has a value of 1, it means that there is exactly one mine adjacent to it. The only possible location for this mine is cell (0,1), because if the mine were in any other adjacent cell, it would not be adjacent to cell (0,0).\n* Similarly, since cell (0,1) has a value of 2, it means that there are exactly two mines adjacent to it. One of these mines is already accounted for by cell (0,0), so the other mine must be in cell (1,1).\n* Continuing this process, we can deduce the locations of the remaining mines.\n\nHere is the solution:\n\n```json\n{\"perception\": [[1, 2, 's', '*'], [1, 3, 's', 2], [2, 3, 's', 2], ['*', 3, 3, 's']], \"think\": \"Use the given numbers to deduce the locations of the mines.\", \"answer\": [['e', 'e', 's', 'e'], ['e', 'e', 's', 'e'], ['e', 'e', 's', 'e'], ['e', 'e', 'e', 's']]}\n```\n\nIn the solution, 'e' indicates an empty cell (no mine), and 's' indicates a cell with a mine."
|
| 32 |
+
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"role": "assistant",
|
| 36 |
+
"content": "Initial State\n[[1,2,s,0],[1,3,s,2],[2,3,s,2],[0,3,3,s]]\nSolution\n[[0,0,s,0],[0,0,s,0],[0,0,s,0],[0,0,0,s]]"
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"role": "user",
|
| 40 |
+
"content": "Right"
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"role": "user",
|
| 44 |
+
"content": "```json\n{\n \"perception\": [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ],\n \"think\": [\n \"There are no numbers or mines visible in this grid. Since it's a standard game layout, I'll need to start with general reasoning to identify potential mine locations based on the given number clues.\",\n \"Starting with the top left and moving across rows, the number 1 indicates that there is a single mine in one of the four adjacent cells. Similarly, the number 2 suggests a single mine in two of the adjacent cells. The number 3 indicates a single mine in three of the adjacent cells. However, without more information, we cannot pinpoint the exact locations of the mines.\",\n \"To narrow down the possibilities, I'll need to identify the patterns created by the revealed numbers. For example, if two cells have a number 3, we can infer that the mine must be in one of those cells. Similarly, if two cells have a number 1, we can infer that the mine must be in one of those cells. By analyzing the revealed numbers and their patterns, we can make educated guesses about the mine locations.\"\n ],\n \"answer\": [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ]\n}\n```"
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"role": "assistant",
|
| 48 |
+
"content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]"
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"role": "user",
|
| 52 |
+
"content": "Right"
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"role": "user",
|
| 56 |
+
"content": "```json\n{\n\"perception\": [\n [\"1\", \"2\", \"*\", \"1\", \"*\"],\n [\"*\", \"*\", \"3\", \"*\", \"2\"],\n [\"2\", \"3\", \"3\", \"*\", \"*\"],\n [\"*\", \"*\", \"3\", \"*\", \"*\"]\n],\n\"think\": \"We start by analyzing the revealed numbers. The grid indicates the number of adjacent mines for the cells. We will deduce where the mines must be based on the numbers given:\\n- The cell (0, 0) is 1 and only touches (0, 1), so (0, 1) has a mine, thus confirming 1 mine at (0, 1).\\n- The cell (0, 3) is also 1, and it can only have a mine in (1, 2) since (0, 2) is adjacent to another number.\\n- For cell (1, 2) which is 3, it should have 3 adjacent mines. Currently (0, 1) is already one of them.\\n- The cell (3, 2) is 3; for it to be true, it requires careful placement in surrounding cells (2, 1), (2, 2), and (2, 3) based on existing clues.\\nBy logically deducing, it turns out that the placement of mines leads to the conclusion below:\\n- Cells (0, 1), (1, 2), (2, 1), (2, 2), (2, 3), and (3, 2) are filled with mines. The remaining cells turn out not to have any mines.\",\n\"answer\": [\n [\"e\", \"s\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\", \"s\"],\n [\"s\", \"s\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\", \"e\"]\n]\n}\n```"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"role": "assistant",
|
| 60 |
+
"content": "Initial State\n[[1,2,0,1,0],[0,0,3,0,2],[2,3,3,0,0],[0,0,3,0,0]]\nSolution\n[[0,s,0,0,0],[0,0,s,0,s],[s,s,s,0,0],[0,0,s,0,0]]"
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": "Right"
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"role": "user",
|
| 68 |
+
"content": "```json\n{\n\"perception\": [\n[\"*\", \"2\", \"*\", \"*\"],\n[\"*\", \"s\", \"3\", \"*\"],\n[\"*\", \"*\", \"*\", \"*\"],\n[\"1\", \"s\", \"3\", \"*\"]\n],\n\"think\": \"1. The '2' at [0, 1] has one revealed mine adjacent to it at [1, 1]. Thus, there must be one more mine adjacent to it. Since the other adjacent cells are [0, 0], [0, 2], [1, 0], and [1, 2], and [1, 2] is adjacent to '3' at [1, 2] which already has a mine at [1, 1], the mine must be at [0, 0], [0, 2], or [1, 0].\\n2. The '3' at [1, 2] has one revealed mine adjacent to it at [1, 1]. Thus, there must be two more mines adjacent to it. The adjacent cells are [0, 1], [0, 2], [0, 3], [1, 1], [1, 3], [2, 1], [2, 2], [2, 3]. Since [0, 1] has a '2' and already has a mine adjacent to it, it cannot be a mine. Thus, the two mines must be among [0, 2], [0, 3], [1, 3], [2, 1], [2, 2], [2, 3].\\n3. The '1' at [3, 0] has one revealed mine adjacent to it at [3, 1]. Thus, all other adjacent cells must be empty. \\n4. The '3' at [3, 2] has one revealed mine adjacent to it at [3, 1]. Thus, there must be two more mines adjacent to it. The adjacent cells are [2, 1], [2, 2], [2, 3], [3, 1], [3, 3]. Since [3, 1] is already a mine, the two mines must be among [2, 1], [2, 2], [2, 3], [3, 3].\",\n\"answer\": [\n[\"s\", \"2\", \"s\", \"e\"],\n[\"e\", \"s\", \"3\", \"s\"],\n[\"e\", \"e\", \"s\", \"e\"],\n[\"1\", \"s\", \"3\", \"s\"]\n]\n}\n```"
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"role": "assistant",
|
| 72 |
+
"content": "Initial State\n[[0,2,0,0],[0,s,3,0],[0,0,0,0],[1,s,3,0]]\nSolution\n[[s,2,s,0],[0,s,3,s],[0,0,s,0],[1,s,3,s]]"
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"role": "user",
|
| 76 |
+
"content": "Right"
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"role": "user",
|
| 80 |
+
"content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"1\", \"*\", \"*\", \"2\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"*\", \"s\", \"3\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"e\", \"s\", \"e\"],\n [\"1\", \"s\", \"s\", \"2\"],\n [\"2\", \"s\", \"s\", \"e\"],\n [\"e\", \"s\", \"3\", \"s\"]\n]\n}\n```"
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"role": "assistant",
|
| 84 |
+
"content": "Initial State\n[[0,0,0,0],[1,0,0,2],[2,0,0,0],[0,s,3,0]]\nSolution\n[[0,0,s,0],[1,0,0,2],[2,s,s,0],[0,s,3,s]]"
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"role": "user",
|
| 88 |
+
"content": "Right"
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"role": "user",
|
| 92 |
+
"content": "```json\n{\n\"perception\": [\n [\"1\", \"*\", \"*\", \"1\"],\n [\"1\", \"s\", \"3\", \"*\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"1\", \"s\", \"3\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"s\", \"s\", \"e\"],\n [\"e\", \"s\", \"s\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"s\", \"s\"]\n]\n}\n```"
|
| 93 |
+
},
|
| 94 |
+
{
|
| 95 |
+
"role": "assistant",
|
| 96 |
+
"content": "Initial State\n[[1,0,0,1],[1,s,3,0],[2,0,0,0],[1,s,3,0]]\nSolution\n[[0,s,s,0],[0,s,s,s],[0,s,0,0],[0,s,s,s]]"
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"role": "user",
|
| 100 |
+
"content": "Right"
|
| 101 |
+
}
|
| 102 |
+
]
|