latex-formulas / latex-formulas.py
OleehyO's picture
Update latex-formulas.py
7405a5a
raw
history blame
4.08 kB
from io import BytesIO
from PIL import Image
from pathlib import Path
import datasets
import json
RAW_METADATA_URL = r'https://huggingface.co/datasets/OleehyO/latex-formulas/resolve/main/raw_formulas.jsonl'
# DIR_URL = r'https://huggingface.co/datasets/OleehyO/latex-formulas/resolve/main/data.tar.gz'
DIR_URL = r'https://huggingface.co/datasets/OleehyO/latex-formulas/resolve/main/data1.tar.gz'
class LatexFormulasConfig(datasets.BuilderConfig):
def __init__(self, data_url, **kwargs):
super().__init__(**kwargs)
self.data_url = data_url
class LatexFormulas(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
LatexFormulasConfig(
name="raw_formulas",
data_url=RAW_METADATA_URL
),
LatexFormulasConfig(
name="cleaned_formulas",
data_url=DIR_URL
)
]
def _info(self):
if self.config.name == "raw_formulas":
return datasets.DatasetInfo(
features=datasets.Features({
"latex_formula": datasets.Value("string")
})
)
if self.config.name == "cleaned_formulas":
return datasets.DatasetInfo(
features=datasets.Features({
"image": datasets.Image(),
"latex_formula": datasets.Value("string")
})
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
if self.config.name == 'raw_formulas':
data_path = dl_manager.download(self.config.data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_path": data_path
}
)
]
if self.config.name == "cleaned_formulas":
# dir_path = Path(data_path)
# dir_path = Path(dl_manager.download_and_extract(data_path))
dir_path = Path(dl_manager.download_and_extract(self.config.data_url)) / 'common_formulas'
assert dir_path.is_dir()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'dir_path': dir_path,
'dl_manager': dl_manager
}
)
]
def _generate_examples(self, data_path=None, dir_path: Path=None, dl_manager=None):
if self.config.name == 'cleaned_formulas':
idx = 0
for directory in dir_path.iterdir():
if not directory.is_dir():
continue
if not directory.name.startswith('process'):
continue
image_path = str(directory / "compressed_img.tar.gz")
metadata_path = str(directory / "tokenized_finally.jsonl")
images = dl_manager.iter_archive(image_path)
img_formula_pair = {}
with open(metadata_path, 'r', encoding='utf-8') as f:
for line in f:
single_json = json.loads(line)
img_formula_pair[single_json['id']] = single_json['formula']
for img_path, img_obj in images:
img_name = img_path.split('/')[-1]
if img_name in img_formula_pair:
idx += 1
# yield idx, {
yield str(directory) + img_path, {
"image": {"path": img_path, "bytes": img_obj.read()},
"latex_formula": img_formula_pair[img_name]
}
if self.config.name == 'raw_formulas':
assert data_path is not None
with open(data_path, 'r', encoding="utf-8") as f:
for idx, line in enumerate(f):
yield idx, {
"latex_formula": json.loads(line)["formula"]
}