Datasets:
Add convert_wang.py
Browse files- convert_wang.py +80 -0
convert_wang.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
from PIL import Image
|
4 |
+
import codecs
|
5 |
+
import numpy as np
|
6 |
+
import glob
|
7 |
+
import io
|
8 |
+
|
9 |
+
def create_dataset():
|
10 |
+
print("Starting dataset creation...")
|
11 |
+
|
12 |
+
# Create output directory for images
|
13 |
+
os.makedirs("processed_images", exist_ok=True)
|
14 |
+
|
15 |
+
# Process wang dataset
|
16 |
+
print("\nProcessing wang dataset...")
|
17 |
+
wang_csv = "./original/wang/free_dataset.csv"
|
18 |
+
if os.path.exists(wang_csv):
|
19 |
+
df_wang = pd.read_csv(wang_csv, header=None)
|
20 |
+
print(f"Found {len(df_wang)} entries in wang dataset")
|
21 |
+
|
22 |
+
data = []
|
23 |
+
file_count = 1
|
24 |
+
|
25 |
+
for i, row in df_wang.iterrows():
|
26 |
+
if i % 100 == 0:
|
27 |
+
print(f"Processing wang entry {i+1}/{len(df_wang)}")
|
28 |
+
|
29 |
+
_, text, _, filename = row
|
30 |
+
image_path = os.path.join("/Users/kobkrit/git/iapp-dataset/thai_handwriting_dataset/original/wang/free_dataset", filename)
|
31 |
+
|
32 |
+
if os.path.exists(image_path):
|
33 |
+
try:
|
34 |
+
img = Image.open(image_path)
|
35 |
+
|
36 |
+
# Convert image to PNG format
|
37 |
+
if img.format != 'PNG':
|
38 |
+
# Create a new RGB image with white background
|
39 |
+
png_img = Image.new('RGB', img.size, (255, 255, 255))
|
40 |
+
# Paste the original image onto the white background
|
41 |
+
png_img.paste(img, mask=img if img.mode=='RGBA' else None)
|
42 |
+
img = png_img
|
43 |
+
|
44 |
+
img_byte_arr = io.BytesIO()
|
45 |
+
img.save(img_byte_arr, format='PNG')
|
46 |
+
img_bytes = {"bytes":bytearray(img_byte_arr.getvalue())}
|
47 |
+
|
48 |
+
data.append({
|
49 |
+
'image': img_bytes,
|
50 |
+
'text': text,
|
51 |
+
'label_file': filename
|
52 |
+
})
|
53 |
+
|
54 |
+
# print(data)
|
55 |
+
|
56 |
+
# Save every 100 rows
|
57 |
+
if len(data) >= 100:
|
58 |
+
print(f"\nSaving batch {file_count} with {len(data)} images")
|
59 |
+
print("Converting to dataframe...")
|
60 |
+
df = pd.DataFrame(data)
|
61 |
+
print(f"Saving to parquet file train-{file_count:04d}.parquet...")
|
62 |
+
df.to_parquet(f"train-{file_count:04d}.parquet", index=False)
|
63 |
+
data = [] # Clear the data list
|
64 |
+
file_count += 1
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
print(f"Error processing wang image {image_path}: {e}")
|
68 |
+
|
69 |
+
# Save any remaining data
|
70 |
+
if len(data) > 0:
|
71 |
+
print(f"\nSaving final batch {file_count} with {len(data)} images")
|
72 |
+
print("Converting to dataframe...")
|
73 |
+
df = pd.DataFrame(data)
|
74 |
+
print(f"Saving to parquet file train-{file_count:04d}.parquet...")
|
75 |
+
df.to_parquet(f"train-{file_count:04d}.parquet", index=False)
|
76 |
+
|
77 |
+
print("Dataset creation complete!")
|
78 |
+
|
79 |
+
if __name__ == "__main__":
|
80 |
+
create_dataset()
|