Upload glm-ocr.py with huggingface_hub
Browse files- glm-ocr.py +12 -6
glm-ocr.py
CHANGED
|
@@ -6,7 +6,6 @@
|
|
| 6 |
# "huggingface-hub",
|
| 7 |
# "pillow",
|
| 8 |
# "vllm",
|
| 9 |
-
# "tqdm",
|
| 10 |
# "toolz",
|
| 11 |
# "torch",
|
| 12 |
# ]
|
|
@@ -58,7 +57,6 @@ from datasets import load_dataset
|
|
| 58 |
from huggingface_hub import DatasetCard, login
|
| 59 |
from PIL import Image
|
| 60 |
from toolz import partition_all
|
| 61 |
-
from tqdm.auto import tqdm
|
| 62 |
from vllm import LLM, SamplingParams
|
| 63 |
|
| 64 |
logging.basicConfig(level=logging.INFO)
|
|
@@ -291,15 +289,20 @@ def main(
|
|
| 291 |
logger.info(f"Output will be written to column: {output_column}")
|
| 292 |
|
| 293 |
all_outputs = []
|
|
|
|
|
|
|
| 294 |
|
| 295 |
-
for batch_indices in
|
| 296 |
-
partition_all(batch_size, range(len(dataset))),
|
| 297 |
-
total=(len(dataset) + batch_size - 1) // batch_size,
|
| 298 |
-
desc="GLM-OCR processing",
|
| 299 |
):
|
| 300 |
batch_indices = list(batch_indices)
|
| 301 |
batch_images = [dataset[i][image_column] for i in batch_indices]
|
| 302 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
try:
|
| 304 |
batch_messages = [
|
| 305 |
make_ocr_message(img, task=task)
|
|
@@ -312,9 +315,12 @@ def main(
|
|
| 312 |
text = output.outputs[0].text.strip()
|
| 313 |
all_outputs.append(text)
|
| 314 |
|
|
|
|
|
|
|
| 315 |
except Exception as e:
|
| 316 |
logger.error(f"Error processing batch: {e}")
|
| 317 |
all_outputs.extend(["[OCR ERROR]"] * len(batch_images))
|
|
|
|
| 318 |
|
| 319 |
processing_duration = datetime.now() - start_time
|
| 320 |
processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
|
|
|
|
| 6 |
# "huggingface-hub",
|
| 7 |
# "pillow",
|
| 8 |
# "vllm",
|
|
|
|
| 9 |
# "toolz",
|
| 10 |
# "torch",
|
| 11 |
# ]
|
|
|
|
| 57 |
from huggingface_hub import DatasetCard, login
|
| 58 |
from PIL import Image
|
| 59 |
from toolz import partition_all
|
|
|
|
| 60 |
from vllm import LLM, SamplingParams
|
| 61 |
|
| 62 |
logging.basicConfig(level=logging.INFO)
|
|
|
|
| 289 |
logger.info(f"Output will be written to column: {output_column}")
|
| 290 |
|
| 291 |
all_outputs = []
|
| 292 |
+
total_batches = (len(dataset) + batch_size - 1) // batch_size
|
| 293 |
+
processed = 0
|
| 294 |
|
| 295 |
+
for batch_num, batch_indices in enumerate(
|
| 296 |
+
partition_all(batch_size, range(len(dataset))), 1
|
|
|
|
|
|
|
| 297 |
):
|
| 298 |
batch_indices = list(batch_indices)
|
| 299 |
batch_images = [dataset[i][image_column] for i in batch_indices]
|
| 300 |
|
| 301 |
+
logger.info(
|
| 302 |
+
f"Batch {batch_num}/{total_batches} "
|
| 303 |
+
f"({processed}/{len(dataset)} images done)"
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
try:
|
| 307 |
batch_messages = [
|
| 308 |
make_ocr_message(img, task=task)
|
|
|
|
| 315 |
text = output.outputs[0].text.strip()
|
| 316 |
all_outputs.append(text)
|
| 317 |
|
| 318 |
+
processed += len(batch_images)
|
| 319 |
+
|
| 320 |
except Exception as e:
|
| 321 |
logger.error(f"Error processing batch: {e}")
|
| 322 |
all_outputs.extend(["[OCR ERROR]"] * len(batch_images))
|
| 323 |
+
processed += len(batch_images)
|
| 324 |
|
| 325 |
processing_duration = datetime.now() - start_time
|
| 326 |
processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
|