parquet-converter commited on
Commit
7805134
1 Parent(s): f7347d7

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,38 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- dialogues.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ch_b_dialogues.py DELETED
@@ -1,133 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
- _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {2ch b dialogues},
30
- author={black_samorez},
31
- year={2022}
32
- }
33
- """
34
-
35
- # TODO: Add description of the dataset here
36
- # You can copy an official description
37
- _DESCRIPTION = """\
38
- Dialogues build from 2ch.hk/b/ threads
39
- """
40
-
41
- # TODO: Add a link to an official homepage for the dataset here
42
- _HOMEPAGE = "2ch.hk/b/"
43
-
44
- # TODO: Add the licence for the dataset here if you can find it
45
- _LICENSE = ""
46
-
47
- # TODO: Add link to the official dataset URLs here
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
- _URLS = {
51
- "dialogues": "dialogues.zip",
52
- }
53
-
54
-
55
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
56
- class DvaChBDialogues(datasets.GeneratorBasedBuilder):
57
- """TODO: Short description of my dataset."""
58
-
59
- VERSION = datasets.Version("1.0.0")
60
-
61
- # This is an example of a dataset with multiple configurations.
62
- # If you don't want/need to define several sub-sets in your dataset,
63
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
64
-
65
- # If you need to make complex sub-parts in the datasets with configurable options
66
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
67
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
68
-
69
- # You will be able to load one or the other configurations in the following list with
70
- # data = datasets.load_dataset('my_dataset', 'first_domain')
71
- # data = datasets.load_dataset('my_dataset', 'second_domain')
72
- BUILDER_CONFIGS = [
73
- datasets.BuilderConfig(name="dialogues", version=VERSION, description="Collected with collect.py in May 2022"),
74
- ]
75
-
76
- DEFAULT_CONFIG_NAME = "dialogues" # It's not mandatory to have a default configuration. Just use one if it make sense.
77
-
78
- def _info(self):
79
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
80
- features = datasets.Features(
81
- {
82
- "dialogue": datasets.Sequence(datasets.Value("string"))
83
- # These are the features of your dataset like images, labels ...
84
- }
85
- )
86
-
87
- return datasets.DatasetInfo(
88
- # This is the description that will appear on the datasets page.
89
- description=_DESCRIPTION,
90
- # This defines the different columns of the dataset and their types
91
- features=features, # Here we define them above because they are different between the two configurations
92
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
93
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
94
- # supervised_keys=("sentence", "label"),
95
- # Homepage of the dataset for documentation
96
- homepage=_HOMEPAGE,
97
- # License for the dataset if available
98
- license=_LICENSE,
99
- # Citation for the dataset
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
105
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
106
-
107
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
108
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
109
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
110
- urls = _URLS[self.config.name]
111
- data_dir = dl_manager.download_and_extract(urls)
112
- return [
113
- datasets.SplitGenerator(
114
- name=datasets.Split.TRAIN,
115
- # These kwargs will be passed to _generate_examples
116
- gen_kwargs={
117
- "filepath": os.path.join(data_dir, "dialogues.jsonl"),
118
- "split": "train",
119
- },
120
- ),
121
- ]
122
-
123
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
124
- def _generate_examples(self, filepath, split):
125
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
126
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
127
- with open(filepath, encoding="utf-8") as f:
128
- for key, row in enumerate(f):
129
- data = json.loads(row)
130
- # Yields examples as (key, example) tuples
131
- yield key, {
132
- "dialogue": data["dialogue"],
133
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,139 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - no-annotation
4
- language_creators:
5
- - found
6
- language:
7
- - ru
8
- license: []
9
- multilinguality:
10
- - monolingual
11
- pretty_name: Dialogues mined from 2ch/b/.
12
- size_categories:
13
- - 10K<n<100K
14
- source_datasets:
15
- - original
16
- task_categories:
17
- - conversational
18
- task_ids:
19
- - dialogue-generation
20
- ---
21
-
22
- # Dataset Card for 2ch_b_dialogues
23
-
24
- ## Table of Contents
25
- - [Dataset Description](#dataset-description)
26
- - [Dataset Summary](#dataset-summary)
27
- - [Supported Tasks](#supported-tasks-and-leaderboards)
28
- - [Languages](#languages)
29
- - [Dataset Structure](#dataset-structure)
30
- - [Data Instances](#data-instances)
31
- - [Data Fields](#data-instances)
32
- - [Data Splits](#data-instances)
33
- - [Dataset Creation](#dataset-creation)
34
- - [Curation Rationale](#curation-rationale)
35
- - [Source Data](#source-data)
36
- - [Annotations](#annotations)
37
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
- - [Considerations for Using the Data](#considerations-for-using-the-data)
39
- - [Social Impact of Dataset](#social-impact-of-dataset)
40
- - [Discussion of Biases](#discussion-of-biases)
41
- - [Other Known Limitations](#other-known-limitations)
42
- - [Additional Information](#additional-information)
43
- - [Dataset Curators](#dataset-curators)
44
- - [Licensing Information](#licensing-information)
45
- - [Citation Information](#citation-information)
46
-
47
- ## Dataset Description
48
-
49
- - **Homepage:** https://github.com/BlackSamorez/ebanko
50
- - **Repository:** [Needs More Information]
51
- - **Paper:** [Needs More Information]
52
- - **Leaderboard:** [Needs More Information]
53
- - **Point of Contact:** [Needs More Information]
54
-
55
- ### Dataset Summary
56
-
57
- Russian language dialogues mined from 2ch.hk/b/
58
-
59
- ### Supported Tasks and Leaderboards
60
-
61
- [Needs More Information]
62
-
63
- ### Languages
64
-
65
- Russian
66
-
67
- ## Dataset Structure
68
-
69
- ### Data Instances
70
-
71
- {
72
- "dialogue": ["Glad to hear!", "Fine, thank you!", "Hi, how are you?"]
73
- }
74
-
75
- ### Data Fields
76
-
77
- - dialogue: list of posts ordered last-to-first
78
-
79
- ### Data Splits
80
-
81
- [Needs More Information]
82
-
83
- ## Dataset Creation
84
-
85
- ### Curation Rationale
86
-
87
- Fun
88
-
89
- ### Source Data
90
-
91
- #### Initial Data Collection and Normalization
92
-
93
- In a thread graph only vertices with single parent were selected. Then non-overlapping threads of dialogues were build.
94
-
95
- #### Who are the source language producers?
96
-
97
- 2ch.hk/b/ users
98
-
99
- ### Annotations
100
-
101
- #### Annotation process
102
-
103
- [Needs More Information]
104
-
105
- #### Who are the annotators?
106
-
107
- [Needs More Information]
108
-
109
- ### Personal and Sensitive Information
110
-
111
- [Needs More Information]
112
-
113
- ## Considerations for Using the Data
114
-
115
- ### Social Impact of Dataset
116
-
117
- Morally questionable data
118
-
119
- ### Discussion of Biases
120
-
121
- [Needs More Information]
122
-
123
- ### Other Known Limitations
124
-
125
- [Needs More Information]
126
-
127
- ## Additional Information
128
-
129
- ### Dataset Curators
130
-
131
- blacks_samorez
132
-
133
- ### Licensing Information
134
-
135
- [Needs More Information]
136
-
137
- ### Citation Information
138
-
139
- [Needs More Information]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
collect.py DELETED
@@ -1,129 +0,0 @@
1
- import pandas as pd
2
- import numpy as np
3
- from tqdm import tqdm, trange
4
- import json
5
- from time import sleep
6
-
7
- import api2ch
8
- import html2text
9
- import re
10
- import os
11
-
12
- API = api2ch.DvachApi('b')
13
-
14
- if os.path.exists("parsed_threads.json"):
15
- with open("parsed_threads.json", "r") as file:
16
- PARSED_TREADS = set(json.load(file)["ids"])
17
- else:
18
- PARSED_TREADS = set()
19
-
20
- REPLY_RE = re.compile(">>[0-9]+")
21
-
22
- HTML_STRIPPER = html2text.HTML2Text()
23
- HTML_STRIPPER.ignore_links = True
24
- HTML_STRIPPER.ignore_images = True
25
- HTML_STRIPPER.ignore_emphasis = True
26
-
27
- def SaveParsed():
28
- with open("parsed_threads.json", "w") as file:
29
- json.dump({"ids": list(PARSED_TREADS)}, file)
30
-
31
- def ThreadGood(thread):
32
- return thread.reply_count > 20
33
-
34
- def AppendDialogues(id, dialogues):
35
- with open("dialogues.jsonl", "a") as file:
36
- for dialogue in dialogues:
37
- json.dump({"id": id, "dialogue": dialogue}, file, ensure_ascii=False)
38
- file.write("\n")
39
-
40
- def FilterAndPrepareText(text):
41
- text = HTML_STRIPPER.handle(text)
42
- text = text.lower()
43
- text = text.replace("\n", " ")
44
- text = text.replace("(op)", "")
45
- if text.find("бамп") != -1:
46
- return
47
- text = re.sub(' +', ' ', text)
48
- text = re.sub('^ ', '', text)
49
- text = re.sub(' $', '', text)
50
- return text
51
-
52
- class Post:
53
- def __init__(self, idx: int, text: str):
54
- self.idx = idx
55
-
56
- self.parents = []
57
- while True:
58
- reply_match = REPLY_RE.match(text)
59
- if reply_match is not None:
60
- parent_id = int(reply_match.group(0)[2:])
61
- span = reply_match.span()
62
- self.parents.append(parent_id)
63
- text = text[:span[0]] + text[span[1]:]
64
- text = re.sub('^ ', '', text)
65
- text = re.sub(' $', '', text)
66
- else:
67
- break
68
- self.text = text
69
-
70
- self.children = []
71
- self.dialogue = [self.text]
72
- self.appeared = False
73
-
74
- def __repr__(self):
75
- return {"idx": self.idx, "parents": self.parents, "text": self.text}.__repr__()
76
-
77
- def build_random_dialogue(self, context):
78
- self.appeared = True
79
- if len(self.dialogue) != 1:
80
- return self.dialogue
81
- if len(self.parents) == 0:
82
- return self.dialogue
83
- chosen_parent = self.parents[0]
84
- if chosen_parent in context.keys():
85
- self.dialogue.extend(context[chosen_parent].build_random_dialogue(context))
86
- return self.dialogue
87
-
88
- def BuildDialogues(thread):
89
- posts = {}
90
- for post in thread:
91
- idx = post.num
92
- text = FilterAndPrepareText(post.comment)
93
- if text is not None:
94
- posts[idx] = Post(idx, text)
95
-
96
- for _, post in reversed(posts.items()):
97
- if not post.appeared:
98
- post.build_random_dialogue(posts)
99
-
100
- return [post.dialogue for post in list(posts.values()) if len(post.dialogue) > 1]
101
-
102
- def main():
103
- print("Started collecting...")
104
- while True:
105
- while True:
106
- try:
107
- board = API.get_board()
108
- break
109
- except:
110
- print(f"Failed to fetch board. Sleeping for 60s")
111
- sleep(60)
112
- print("Got board")
113
-
114
- for thread in board:
115
- if thread.num not in PARSED_TREADS and ThreadGood(thread):
116
- PARSED_TREADS.add(thread.num)
117
- try:
118
- thread = API.get_thread(thread.num)
119
- except:
120
- continue
121
- dialogues = BuildDialogues(thread)
122
- AppendDialogues(thread[0].num, dialogues)
123
- print("Parsed")
124
- SaveParsed()
125
- print("Saved. Sleeping for 10m")
126
- sleep(600)
127
-
128
- if __name__ == "__main__":
129
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dialogues.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e45c097dc98f53fe8d2cfc05308631aba88e0c58b337f8232f38733a1c917985
3
- size 112897459
 
 
 
 
dialogues.zip → dialogues/2ch_b_dialogues-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fea10f1be18917763b343fa6b34fab93c3b93723301358875d909f0133bed90
3
- size 11359216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19b53b4d6e675f112716c8f7c372848a600dd7380a13fe6f5ef3e434f7a284cf
3
+ size 10979948