autotrain-data-processor commited on
Commit
5ca2d71
1 Parent(s): 0f336a3

Processed data from AutoTrain data processor ([2022-07-07 07:02 ]

Browse files
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {}
3
+
4
+ ---
5
+ # AutoTrain Dataset for project: ZuoZhuan
6
+
7
+ ## Dataset Descritpion
8
+
9
+ This dataset has been automatically processed by AutoTrain for project ZuoZhuan.
10
+
11
+ ### Languages
12
+
13
+ The BCP-47 code for the dataset's language is unk.
14
+
15
+ ## Dataset Structure
16
+
17
+ ### Data Instances
18
+
19
+ A sample from this dataset looks as follows:
20
+
21
+ ```json
22
+ [
23
+ {
24
+ "tokens": [
25
+ "\u4e09",
26
+ "\u5de1",
27
+ "\u6578",
28
+ "\u4e4b",
29
+ "\u3002"
30
+ ],
31
+ "tags": [
32
+ 6,
33
+ 23,
34
+ 23,
35
+ 15,
36
+ 24
37
+ ]
38
+ },
39
+ {
40
+ "tokens": [
41
+ "\u9042",
42
+ "\u6b78",
43
+ "\uff0c",
44
+ "\u5fa9",
45
+ "\u547d",
46
+ "\uff0c",
47
+ "\u800c",
48
+ "\u81ea",
49
+ "\u62d8",
50
+ "\u65bc",
51
+ "\u53f8",
52
+ "\u6557",
53
+ "\u3002"
54
+ ],
55
+ "tags": [
56
+ 3,
57
+ 23,
58
+ 24,
59
+ 23,
60
+ 8,
61
+ 24,
62
+ 2,
63
+ 15,
64
+ 23,
65
+ 13,
66
+ 8,
67
+ 8,
68
+ 24
69
+ ]
70
+ }
71
+ ]
72
+ ```
73
+
74
+ ### Dataset Fields
75
+
76
+ The dataset has the following fields (also called "features"):
77
+
78
+ ```json
79
+ {
80
+ "tokens": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)",
81
+ "tags": "Sequence(feature=ClassLabel(num_classes=28, names=['/a', '/b', '/c', '/d', '/f', '/j', '/m', '/mr', '/n', '/nn', '/nr', '/ns', '/nsr', '/p', '/q', '/r', '/rn', '/rr', '/rs', '/s', '/sv', '/t', '/u', '/v', '/w', '/wv', '/y', '/yv'], id=None), length=-1, id=None)"
82
+ }
83
+ ```
84
+
85
+ ### Dataset Splits
86
+
87
+ This dataset is split into a train and validation split. The split sizes are as follow:
88
+
89
+ | Split name | Num samples |
90
+ | ------------ | ------------------- |
91
+ | train | 5836 |
92
+ | valid | 2860 |
processed/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["train", "valid"]}
processed/train/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:443afd4b61de6287fc5e6adcbab265f6980d0100679a50c034ad40bd64c93db9
3
+ size 1977600
processed/train/dataset_info.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": null,
3
+ "citation": "",
4
+ "config_name": null,
5
+ "dataset_size": null,
6
+ "description": "AutoTrain generated dataset",
7
+ "download_checksums": null,
8
+ "download_size": null,
9
+ "features": {
10
+ "tokens": {
11
+ "feature": {
12
+ "dtype": "string",
13
+ "id": null,
14
+ "_type": "Value"
15
+ },
16
+ "length": -1,
17
+ "id": null,
18
+ "_type": "Sequence"
19
+ },
20
+ "tags": {
21
+ "feature": {
22
+ "num_classes": 28,
23
+ "names": [
24
+ "/a",
25
+ "/b",
26
+ "/c",
27
+ "/d",
28
+ "/f",
29
+ "/j",
30
+ "/m",
31
+ "/mr",
32
+ "/n",
33
+ "/nn",
34
+ "/nr",
35
+ "/ns",
36
+ "/nsr",
37
+ "/p",
38
+ "/q",
39
+ "/r",
40
+ "/rn",
41
+ "/rr",
42
+ "/rs",
43
+ "/s",
44
+ "/sv",
45
+ "/t",
46
+ "/u",
47
+ "/v",
48
+ "/w",
49
+ "/wv",
50
+ "/y",
51
+ "/yv"
52
+ ],
53
+ "id": null,
54
+ "_type": "ClassLabel"
55
+ },
56
+ "length": -1,
57
+ "id": null,
58
+ "_type": "Sequence"
59
+ }
60
+ },
61
+ "homepage": "",
62
+ "license": "",
63
+ "post_processed": null,
64
+ "post_processing_size": null,
65
+ "size_in_bytes": null,
66
+ "splits": {
67
+ "train": {
68
+ "name": "train",
69
+ "num_bytes": 1974827,
70
+ "num_examples": 5836,
71
+ "dataset_name": null
72
+ }
73
+ },
74
+ "supervised_keys": null,
75
+ "task_templates": null,
76
+ "version": null
77
+ }
processed/train/state.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "6fea55cfa1c700b8",
8
+ "_format_columns": [
9
+ "tags",
10
+ "tokens"
11
+ ],
12
+ "_format_kwargs": {},
13
+ "_format_type": null,
14
+ "_indexes": {},
15
+ "_output_all_columns": false,
16
+ "_split": null
17
+ }
processed/valid/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52b5d838cfb001f6337e51463d99b61152b6b6d1f2edb314f4ca04d4810cb22f
3
+ size 1019176
processed/valid/dataset_info.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": null,
3
+ "citation": "",
4
+ "config_name": null,
5
+ "dataset_size": null,
6
+ "description": "AutoTrain generated dataset",
7
+ "download_checksums": null,
8
+ "download_size": null,
9
+ "features": {
10
+ "tokens": {
11
+ "feature": {
12
+ "dtype": "string",
13
+ "id": null,
14
+ "_type": "Value"
15
+ },
16
+ "length": -1,
17
+ "id": null,
18
+ "_type": "Sequence"
19
+ },
20
+ "tags": {
21
+ "feature": {
22
+ "num_classes": 28,
23
+ "names": [
24
+ "/a",
25
+ "/b",
26
+ "/c",
27
+ "/d",
28
+ "/f",
29
+ "/j",
30
+ "/m",
31
+ "/mr",
32
+ "/n",
33
+ "/nn",
34
+ "/nr",
35
+ "/ns",
36
+ "/nsr",
37
+ "/p",
38
+ "/q",
39
+ "/r",
40
+ "/rn",
41
+ "/rr",
42
+ "/rs",
43
+ "/s",
44
+ "/sv",
45
+ "/t",
46
+ "/u",
47
+ "/v",
48
+ "/w",
49
+ "/wv",
50
+ "/y",
51
+ "/yv"
52
+ ],
53
+ "id": null,
54
+ "_type": "ClassLabel"
55
+ },
56
+ "length": -1,
57
+ "id": null,
58
+ "_type": "Sequence"
59
+ }
60
+ },
61
+ "homepage": "",
62
+ "license": "",
63
+ "post_processed": null,
64
+ "post_processing_size": null,
65
+ "size_in_bytes": null,
66
+ "splits": {
67
+ "valid": {
68
+ "name": "valid",
69
+ "num_bytes": 1017392,
70
+ "num_examples": 2860,
71
+ "dataset_name": null
72
+ }
73
+ },
74
+ "supervised_keys": null,
75
+ "task_templates": null,
76
+ "version": null
77
+ }
processed/valid/state.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "729e9e9c0b2c998e",
8
+ "_format_columns": [
9
+ "tags",
10
+ "tokens"
11
+ ],
12
+ "_format_kwargs": {},
13
+ "_format_type": null,
14
+ "_indexes": {},
15
+ "_output_all_columns": false,
16
+ "_split": null
17
+ }