Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
100K<n<1M
License:
File size: 4,329 Bytes
820660d 946738b 820660d 6c33dc8 820660d 946738b 820660d 098daae 820660d 098daae 946738b 820660d 946738b 820660d 6c33dc8 820660d 946738b 6c33dc8 946738b 820660d 946738b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
"""New York Times Ingredient Phrase Tagger Dataset"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@misc{nytimesTaggedIngredients,
author = {Erica Greene and Adam Mckaig},
title = {{O}ur {T}agged {I}ngredients {D}ata is {N}ow on {G}it{H}ub --- archive.nytimes.com},
howpublished = {\\url{https://archive.nytimes.com/open.blogs.nytimes.com/2016/04/27/structured-ingredients-data-tagging/}},
year = {},
note = {[Accessed 03-10-2023]},
}
"""
_DESCRIPTION = """\
New York Times Ingredient Phrase Tagger Dataset
We use a conditional random field model (CRF) to extract tags from labelled training data, which was tagged by human news assistants.
e wrote about our approach on the [New York Times Open blog](http://open.blogs.nytimes.com/2015/04/09/extracting-structured-data-from-recipes-using-conditional-random-fields/).
This repo contains scripts to extract the Quantity, Unit, Name, and Comments from unstructured ingredient phrases.
We use it on Cooking to format incoming recipes. Given the following input:
```
1 pound carrots, young ones if possible
Kosher salt, to taste
2 tablespoons sherry vinegar
2 tablespoons honey
2 tablespoons extra-virgin olive oil
1 medium-size shallot, peeled and finely diced
1/2 teaspoon fresh thyme leaves, finely chopped
Black pepper, to taste
```
"""
_URL = "https://github.com/nytimes/ingredient-phrase-tagger"
_URLS = {
"train": "https://huggingface.co/datasets/napsternxg/nyt_ingredients/resolve/main/nyt-ingredients.crf.jsonl"
}
import json
class NYTIngedientsConfig(datasets.BuilderConfig):
"""The NYTIngedients Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for NYTIngedients.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(NYTIngedientsConfig, self).__init__(**kwargs)
class NYTIngedients(datasets.GeneratorBasedBuilder):
"""The NYTIngedients Dataset."""
BUILDER_CONFIGS = [
NYTIngedientsConfig(
name="nyt_ingredients",
version=datasets.Version("1.0.0"),
description="The NYTIngedients Dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"input": datasets.Value("string"),
"display_input": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"index": datasets.Sequence(datasets.Value("string")),
"lengthGroup": datasets.Sequence(datasets.Value("string")),
"isCapitalized": datasets.Sequence(datasets.Value("string")),
"insideParenthesis": datasets.Sequence(datasets.Value("string")),
"label": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-COMMENT",
"I-COMMENT",
"B-NAME",
"I-NAME",
"B-RANGE_END",
"I-RANGE_END",
"B-QTY",
"I-QTY",
"B-UNIT",
"I-UNIT",
]
)
),
}
),
supervised_keys=None,
homepage="https://github.com/nytimes/ingredient-phrase-tagger",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": f"{data_dir}/nyt-ingredients.crf.jsonl"},
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as fp:
for i, line in enumerate(fp):
yield i, json.loads(line)
|