asahi417 commited on
Commit
7c6a648
1 Parent(s): f7e88b0
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license:
5
+ - other
6
+ multilinguality:
7
+ - monolingual
8
+ size_categories:
9
+ - 1K<n<10K
10
+ task_categories:
11
+ - token-classification
12
+ task_ids:
13
+ - named-entity-recognition
14
+ pretty_name: MIT Movie
15
+ ---
16
+
17
+ # Dataset Card for "tner/mit_movie_trivia"
18
+
19
+ ## Dataset Description
20
+
21
+ - **Repository:** [T-NER](https://github.com/asahi417/tner)
22
+ - **Dataset:** MIT Movie
23
+ - **Domain:** Movie
24
+ - **Number of Entity:** 12
25
+
26
+ ### Dataset Summary
27
+ MIT Movie NER dataset formatted in a part of [TNER](https://github.com/asahi417/tner) project.
28
+
29
+ - Entity Types: `Actor`, `Plot`, `Opinion`, `Award`, `Year`, `Genre`, `Origin`, `Director`, `Soundtrack`, `Relationship`, `Character_Name`, `Quote`
30
+
31
+ ## Dataset Structure
32
+
33
+ ### Data Instances
34
+ An example of `train` looks as follows.
35
+
36
+ ```
37
+ {
38
+ 'tags': [0, 0, 0, 0, 0, 0, 0, 0, 5, 3, 4, 0],
39
+ 'tokens': ['can', 'you', 'find', 'the', 'phone', 'number', 'for', 'the', 'closest', 'family', 'style', 'restaurant']
40
+ }
41
+ ```
42
+
43
+ ### Label ID
44
+ The label2id dictionary can be found at [here](https://huggingface.co/datasets/tner/mit_movie_trivia/raw/main/dataset/label.json).
45
+ ```python
46
+ {
47
+ "O": 0,
48
+ "B-Actor": 1,
49
+ "I-Actor": 2,
50
+ "B-Plot": 3,
51
+ "I-Plot": 4,
52
+ "B-Opinion": 5,
53
+ "I-Opinion": 6,
54
+ "B-Award": 7,
55
+ "I-Award": 8,
56
+ "B-Year": 9,
57
+ "B-Genre": 10,
58
+ "B-Origin": 11,
59
+ "I-Origin": 12,
60
+ "B-Director": 13,
61
+ "I-Director": 14,
62
+ "I-Genre": 15,
63
+ "I-Year": 16,
64
+ "B-Soundtrack": 17,
65
+ "I-Soundtrack": 18,
66
+ "B-Relationship": 19,
67
+ "I-Relationship": 20,
68
+ "B-Character_Name": 21,
69
+ "I-Character_Name": 22,
70
+ "B-Quote": 23,
71
+ "I-Quote": 24
72
+ }
73
+ ```
74
+
75
+ ### Data Splits
76
+
77
+ | name |train|validation|test|
78
+ |---------|----:|---------:|---:|
79
+ |mit_movie_trivia |6900 | 760| 1521|
dataset/label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"O": 0, "B-Actor": 1, "I-Actor": 2, "B-Plot": 3, "I-Plot": 4, "B-Opinion": 5, "I-Opinion": 6, "B-Award": 7, "I-Award": 8, "B-Year": 9, "B-Genre": 10, "B-Origin": 11, "I-Origin": 12, "B-Director": 13, "I-Director": 14, "I-Genre": 15, "I-Year": 16, "B-Soundtrack": 17, "I-Soundtrack": 18, "B-Relationship": 19, "I-Relationship": 20, "B-Character_Name": 21, "I-Character_Name": 22, "B-Quote": 23, "I-Quote": 24}
dataset/test.json ADDED
The diff for this file is too large to render. See raw diff
 
dataset/train.json ADDED
The diff for this file is too large to render. See raw diff
 
dataset/valid.json ADDED
The diff for this file is too large to render. See raw diff
 
mit_movie_trivia.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
2
+ import json
3
+ from itertools import chain
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """[FIN NER dataset](https://aclanthology.org/U15-1010.pdf)"""
8
+ _NAME = "fin"
9
+ _VERSION = "1.0.0"
10
+ _CITATION = """
11
+ @inproceedings{salinas-alvarado-etal-2015-domain,
12
+ title = "Domain Adaption of Named Entity Recognition to Support Credit Risk Assessment",
13
+ author = "Salinas Alvarado, Julio Cesar and
14
+ Verspoor, Karin and
15
+ Baldwin, Timothy",
16
+ booktitle = "Proceedings of the Australasian Language Technology Association Workshop 2015",
17
+ month = dec,
18
+ year = "2015",
19
+ address = "Parramatta, Australia",
20
+ url = "https://aclanthology.org/U15-1010",
21
+ pages = "84--90",
22
+ }
23
+ """
24
+
25
+ _HOME_PAGE = "https://github.com/asahi417/tner"
26
+ _URL = f'https://huggingface.co/datasets/tner/{_NAME}/raw/main/dataset'
27
+ _URLS = {
28
+ str(datasets.Split.TEST): [f'{_URL}/test.json'],
29
+ str(datasets.Split.TRAIN): [f'{_URL}/train.json'],
30
+ str(datasets.Split.VALIDATION): [f'{_URL}/valid.json'],
31
+ }
32
+
33
+
34
+ class FinConfig(datasets.BuilderConfig):
35
+ """BuilderConfig"""
36
+
37
+ def __init__(self, **kwargs):
38
+ """BuilderConfig.
39
+
40
+ Args:
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(FinConfig, self).__init__(**kwargs)
44
+
45
+
46
+ class Fin(datasets.GeneratorBasedBuilder):
47
+ """Dataset."""
48
+
49
+ BUILDER_CONFIGS = [
50
+ FinConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
51
+ ]
52
+
53
+ def _split_generators(self, dl_manager):
54
+ downloaded_file = dl_manager.download_and_extract(_URLS)
55
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
56
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
57
+
58
+ def _generate_examples(self, filepaths):
59
+ _key = 0
60
+ for filepath in filepaths:
61
+ logger.info(f"generating examples from = {filepath}")
62
+ with open(filepath, encoding="utf-8") as f:
63
+ _list = [i for i in f.read().split('\n') if len(i) > 0]
64
+ for i in _list:
65
+ data = json.loads(i)
66
+ yield _key, data
67
+ _key += 1
68
+
69
+ def _info(self):
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ "tokens": datasets.Sequence(datasets.Value("string")),
75
+ "tags": datasets.Sequence(datasets.Value("int32")),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ homepage=_HOME_PAGE,
80
+ citation=_CITATION,
81
+ )