AndrewMcDowell
commited on
Commit
β’
6bd1c39
1
Parent(s):
561be41
Training in progress, step 1000
Browse files- .ipynb_checkpoints/eval-checkpoint.py +132 -0
- .ipynb_checkpoints/eval_results-checkpoint.json +9 -0
- .ipynb_checkpoints/log_mozilla-foundation_common_voice_8_0_ja_test_predictions-checkpoint.txt +0 -0
- .ipynb_checkpoints/log_mozilla-foundation_common_voice_8_0_ja_test_targets-checkpoint.txt +0 -0
- .ipynb_checkpoints/run_training-checkpoint.sh +1 -1
- .ipynb_checkpoints/speech_training_notebook-checkpoint.ipynb +92 -0
- pytorch_model.bin +1 -1
- run_training.sh +1 -1
- special_tokens_map.json +1 -1
- speech_training_notebook.ipynb +59 -0
- training_args.bin +1 -1
.ipynb_checkpoints/eval-checkpoint.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from datasets import load_dataset, load_metric, Audio, Dataset
|
3 |
+
from transformers import pipeline, AutoFeatureExtractor
|
4 |
+
import re
|
5 |
+
import argparse
|
6 |
+
import unicodedata
|
7 |
+
from typing import Dict
|
8 |
+
|
9 |
+
|
10 |
+
def log_results(result: Dataset, args: Dict[str, str]):
|
11 |
+
""" DO NOT CHANGE. This function computes and logs the result metrics. """
|
12 |
+
|
13 |
+
log_outputs = args.log_outputs
|
14 |
+
dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
|
15 |
+
|
16 |
+
# load metric
|
17 |
+
wer = load_metric("wer")
|
18 |
+
cer = load_metric("cer")
|
19 |
+
|
20 |
+
# compute metrics
|
21 |
+
wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
|
22 |
+
cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
|
23 |
+
|
24 |
+
# print & log results
|
25 |
+
result_str = (
|
26 |
+
f"WER: {wer_result}\n"
|
27 |
+
f"CER: {cer_result}"
|
28 |
+
)
|
29 |
+
print(result_str)
|
30 |
+
|
31 |
+
with open(f"{dataset_id}_eval_results.txt", "w") as f:
|
32 |
+
f.write(result_str)
|
33 |
+
|
34 |
+
# log all results in text file. Possibly interesting for analysis
|
35 |
+
if log_outputs is not None:
|
36 |
+
pred_file = f"log_{dataset_id}_predictions.txt"
|
37 |
+
target_file = f"log_{dataset_id}_targets.txt"
|
38 |
+
|
39 |
+
with open(pred_file, "w") as p, open(target_file, "w") as t:
|
40 |
+
|
41 |
+
# mapping function to write output
|
42 |
+
def write_to_file(batch, i):
|
43 |
+
p.write(f"{i}" + "\n")
|
44 |
+
p.write(batch["prediction"] + "\n")
|
45 |
+
t.write(f"{i}" + "\n")
|
46 |
+
t.write(batch["target"] + "\n")
|
47 |
+
|
48 |
+
result.map(write_to_file, with_indices=True)
|
49 |
+
|
50 |
+
|
51 |
+
def normalize_text(text: str) -> str:
|
52 |
+
""" DO ADAPT FOR YOUR USE CASE. this function normalizes the target text. """
|
53 |
+
|
54 |
+
from pykakasi import kakasi
|
55 |
+
|
56 |
+
kakasi = kakasi()
|
57 |
+
kakasi.setMode('J', 'H') #Convert from kanji to hiragana
|
58 |
+
conv = kakasi.getConverter()
|
59 |
+
chars_to_ignore_regex = '[\,\?\!\-\;\:\"\β\%\β\β\οΏ½\β\β\β¦\β\οΌ\οΌ\[\]\)\(\οΌ]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
60 |
+
|
61 |
+
text = text.lower()
|
62 |
+
# normalize non-standard (stylized) unicode characters
|
63 |
+
text = unicodedata.normalize('NFKC', text)
|
64 |
+
# remove punctuation
|
65 |
+
text = conv.do(re.sub(chars_to_ignore_regex, "", text))
|
66 |
+
|
67 |
+
# Let's also make sure we split on all kinds of newlines, spaces, etc...
|
68 |
+
text = " ".join(text.split())
|
69 |
+
|
70 |
+
return text
|
71 |
+
|
72 |
+
|
73 |
+
def main(args):
|
74 |
+
# load dataset
|
75 |
+
dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
|
76 |
+
|
77 |
+
# for testing: only process the first two examples as a test
|
78 |
+
# dataset = dataset.select(range(10))
|
79 |
+
|
80 |
+
# load processor
|
81 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
|
82 |
+
sampling_rate = feature_extractor.sampling_rate
|
83 |
+
|
84 |
+
# resample audio
|
85 |
+
dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
|
86 |
+
|
87 |
+
# load eval pipeline
|
88 |
+
asr = pipeline("automatic-speech-recognition", model=args.model_id)
|
89 |
+
|
90 |
+
# map function to decode audio
|
91 |
+
def map_to_pred(batch):
|
92 |
+
prediction = asr(batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s)
|
93 |
+
|
94 |
+
batch["prediction"] = prediction["text"]
|
95 |
+
batch["target"] = normalize_text(batch["sentence"])
|
96 |
+
return batch
|
97 |
+
|
98 |
+
# run inference on all examples
|
99 |
+
result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
|
100 |
+
|
101 |
+
# compute and log_results
|
102 |
+
# do not change function below
|
103 |
+
log_results(result, args)
|
104 |
+
|
105 |
+
|
106 |
+
if __name__ == "__main__":
|
107 |
+
parser = argparse.ArgumentParser()
|
108 |
+
|
109 |
+
parser.add_argument(
|
110 |
+
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with π€ Transformers"
|
111 |
+
)
|
112 |
+
parser.add_argument(
|
113 |
+
"--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with π€ Datasets"
|
114 |
+
)
|
115 |
+
parser.add_argument(
|
116 |
+
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
|
117 |
+
)
|
118 |
+
parser.add_argument(
|
119 |
+
"--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`"
|
120 |
+
)
|
121 |
+
parser.add_argument(
|
122 |
+
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to None. For long audio files a good value would be 5.0 seconds."
|
123 |
+
)
|
124 |
+
parser.add_argument(
|
125 |
+
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to None. For long audio files a good value would be 1.0 seconds."
|
126 |
+
)
|
127 |
+
parser.add_argument(
|
128 |
+
"--log_outputs", action='store_true', help="If defined, write outputs to log file for analysis."
|
129 |
+
)
|
130 |
+
args = parser.parse_args()
|
131 |
+
|
132 |
+
main(args)
|
.ipynb_checkpoints/eval_results-checkpoint.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"eval_loss": 0.9563776254653931,
|
4 |
+
"eval_runtime": 234.4791,
|
5 |
+
"eval_samples": 4483,
|
6 |
+
"eval_samples_per_second": 19.119,
|
7 |
+
"eval_steps_per_second": 2.393,
|
8 |
+
"eval_wer": 2.4134786476868326
|
9 |
+
}
|
.ipynb_checkpoints/log_mozilla-foundation_common_voice_8_0_ja_test_predictions-checkpoint.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.ipynb_checkpoints/log_mozilla-foundation_common_voice_8_0_ja_test_targets-checkpoint.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.ipynb_checkpoints/run_training-checkpoint.sh
CHANGED
@@ -4,7 +4,7 @@ python run_speech_recognition_ctc_bnb.py \
|
|
4 |
--dataset_config_name="ja" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
-
--num_train_epochs="
|
8 |
--per_device_train_batch_size="48" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--learning_rate="7.5e-5" \
|
|
|
4 |
--dataset_config_name="ja" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
+
--num_train_epochs="50" \
|
8 |
--per_device_train_batch_size="48" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--learning_rate="7.5e-5" \
|
.ipynb_checkpoints/speech_training_notebook-checkpoint.ipynb
CHANGED
@@ -327,6 +327,98 @@
|
|
327 |
"common_voice_test = common_voice_test.map(remove_special_characters)"
|
328 |
]
|
329 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
{
|
331 |
"cell_type": "code",
|
332 |
"execution_count": 6,
|
|
|
327 |
"common_voice_test = common_voice_test.map(remove_special_characters)"
|
328 |
]
|
329 |
},
|
330 |
+
{
|
331 |
+
"cell_type": "code",
|
332 |
+
"execution_count": 24,
|
333 |
+
"metadata": {},
|
334 |
+
"outputs": [
|
335 |
+
{
|
336 |
+
"data": {
|
337 |
+
"text/plain": [
|
338 |
+
"{'client_id': '02a8841a00d762472a4797b56ee01643e8d9ece5a225f2e91c007ab1f94c49c99e50d19986ff3fefb18190257323f34238828114aa607f84fbe9764ecf5aaeaa',\n",
|
339 |
+
" 'path': 'cv-corpus-8.0-2022-01-19/ja/clips/common_voice_ja_25467658.mp3',\n",
|
340 |
+
" 'audio': {'path': 'cv-corpus-8.0-2022-01-19/ja/clips/common_voice_ja_25467658.mp3',\n",
|
341 |
+
" 'array': array([0. , 0. , 0. , ..., 0.00026336, 0.00038834,\n",
|
342 |
+
" 0.00026771], dtype=float32),\n",
|
343 |
+
" 'sampling_rate': 48000},\n",
|
344 |
+
" 'sentence': 'γ‘γγ£γ¨γγ£γγγ§γγ©γγ«γγγγΎγγ¦γ',\n",
|
345 |
+
" 'up_votes': 2,\n",
|
346 |
+
" 'down_votes': 0,\n",
|
347 |
+
" 'age': 'fourties',\n",
|
348 |
+
" 'gender': 'female',\n",
|
349 |
+
" 'accent': '',\n",
|
350 |
+
" 'locale': 'ja',\n",
|
351 |
+
" 'segment': ''}"
|
352 |
+
]
|
353 |
+
},
|
354 |
+
"execution_count": 24,
|
355 |
+
"metadata": {},
|
356 |
+
"output_type": "execute_result"
|
357 |
+
}
|
358 |
+
],
|
359 |
+
"source": [
|
360 |
+
"common_voice_train[1]"
|
361 |
+
]
|
362 |
+
},
|
363 |
+
{
|
364 |
+
"cell_type": "code",
|
365 |
+
"execution_count": 25,
|
366 |
+
"metadata": {},
|
367 |
+
"outputs": [
|
368 |
+
{
|
369 |
+
"name": "stdout",
|
370 |
+
"output_type": "stream",
|
371 |
+
"text": [
|
372 |
+
"Requirement already satisfied: datasets in /opt/conda/lib/python3.8/site-packages (1.18.2.dev0)\n",
|
373 |
+
"Collecting datasets\n",
|
374 |
+
" Downloading datasets-1.18.3-py3-none-any.whl (311 kB)\n",
|
375 |
+
" |ββββββββββββββββββββββββββββββββ| 311 kB 11.0 MB/s \n",
|
376 |
+
"\u001b[?25hRequirement already satisfied: aiohttp in /opt/conda/lib/python3.8/site-packages (from datasets) (3.8.1)\n",
|
377 |
+
"Requirement already satisfied: huggingface-hub<1.0.0,>=0.1.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (0.4.0)\n",
|
378 |
+
"Requirement already satisfied: dill in /opt/conda/lib/python3.8/site-packages (from datasets) (0.3.4)\n",
|
379 |
+
"Requirement already satisfied: xxhash in /opt/conda/lib/python3.8/site-packages (from datasets) (2.0.2)\n",
|
380 |
+
"Requirement already satisfied: multiprocess in /opt/conda/lib/python3.8/site-packages (from datasets) (0.70.12.2)\n",
|
381 |
+
"Requirement already satisfied: pandas in /opt/conda/lib/python3.8/site-packages (from datasets) (1.4.0)\n",
|
382 |
+
"Requirement already satisfied: pyarrow!=4.0.0,>=3.0.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (6.0.1)\n",
|
383 |
+
"Requirement already satisfied: requests>=2.19.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (2.24.0)\n",
|
384 |
+
"Requirement already satisfied: fsspec[http]>=2021.05.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (2022.1.0)\n",
|
385 |
+
"Requirement already satisfied: numpy>=1.17 in /opt/conda/lib/python3.8/site-packages (from datasets) (1.19.2)\n",
|
386 |
+
"Requirement already satisfied: tqdm>=4.62.1 in /opt/conda/lib/python3.8/site-packages (from datasets) (4.62.3)\n",
|
387 |
+
"Requirement already satisfied: packaging in /opt/conda/lib/python3.8/site-packages (from datasets) (21.3)\n",
|
388 |
+
"Requirement already satisfied: filelock in /opt/conda/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.0.12)\n",
|
389 |
+
"Requirement already satisfied: pyyaml in /opt/conda/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (5.4.1)\n",
|
390 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/conda/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (4.0.1)\n",
|
391 |
+
"Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.8/site-packages (from packaging->datasets) (3.0.7)\n",
|
392 |
+
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (1.25.11)\n",
|
393 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2020.12.5)\n",
|
394 |
+
"Requirement already satisfied: chardet<4,>=3.0.2 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (3.0.4)\n",
|
395 |
+
"Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2.10)\n",
|
396 |
+
"Requirement already satisfied: multidict<7.0,>=4.5 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (6.0.2)\n",
|
397 |
+
"Requirement already satisfied: frozenlist>=1.1.1 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (1.3.0)\n",
|
398 |
+
"Requirement already satisfied: charset-normalizer<3.0,>=2.0 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (2.0.10)\n",
|
399 |
+
"Requirement already satisfied: yarl<2.0,>=1.0 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (1.7.2)\n",
|
400 |
+
"Requirement already satisfied: attrs>=17.3.0 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (21.4.0)\n",
|
401 |
+
"Requirement already satisfied: aiosignal>=1.1.2 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (1.2.0)\n",
|
402 |
+
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (4.0.2)\n",
|
403 |
+
"Requirement already satisfied: pytz>=2020.1 in /opt/conda/lib/python3.8/site-packages (from pandas->datasets) (2021.1)\n",
|
404 |
+
"Requirement already satisfied: python-dateutil>=2.8.1 in /opt/conda/lib/python3.8/site-packages (from pandas->datasets) (2.8.2)\n",
|
405 |
+
"Requirement already satisfied: six>=1.5 in /opt/conda/lib/python3.8/site-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.15.0)\n",
|
406 |
+
"Installing collected packages: datasets\n",
|
407 |
+
" Attempting uninstall: datasets\n",
|
408 |
+
" Found existing installation: datasets 1.18.2.dev0\n",
|
409 |
+
" Uninstalling datasets-1.18.2.dev0:\n",
|
410 |
+
"\u001b[31mERROR: Could not install packages due to an OSError: [Errno 13] Permission denied: 'entry_points.txt'\n",
|
411 |
+
"Consider using the `--user` option or check the permissions.\n",
|
412 |
+
"\u001b[0m\n",
|
413 |
+
"\u001b[33mWARNING: You are using pip version 21.3.1; however, version 22.0.2 is available.\n",
|
414 |
+
"You should consider upgrading via the '/opt/conda/bin/python -m pip install --upgrade pip' command.\u001b[0m\n"
|
415 |
+
]
|
416 |
+
}
|
417 |
+
],
|
418 |
+
"source": [
|
419 |
+
"!pip install --upgrade datasets"
|
420 |
+
]
|
421 |
+
},
|
422 |
{
|
423 |
"cell_type": "code",
|
424 |
"execution_count": 6,
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1262956849
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:416e43f69cc98a936f068cbc6b786eded59831d04ed15bbf501c4040a5a2b996
|
3 |
size 1262956849
|
run_training.sh
CHANGED
@@ -4,7 +4,7 @@ python run_speech_recognition_ctc_bnb.py \
|
|
4 |
--dataset_config_name="ja" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
-
--num_train_epochs="
|
8 |
--per_device_train_batch_size="48" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--learning_rate="7.5e-5" \
|
|
|
4 |
--dataset_config_name="ja" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
+
--num_train_epochs="50" \
|
8 |
--per_device_train_batch_size="48" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--learning_rate="7.5e-5" \
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
speech_training_notebook.ipynb
CHANGED
@@ -360,6 +360,65 @@
|
|
360 |
"common_voice_train[1]"
|
361 |
]
|
362 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
363 |
{
|
364 |
"cell_type": "code",
|
365 |
"execution_count": 6,
|
|
|
360 |
"common_voice_train[1]"
|
361 |
]
|
362 |
},
|
363 |
+
{
|
364 |
+
"cell_type": "code",
|
365 |
+
"execution_count": 25,
|
366 |
+
"metadata": {},
|
367 |
+
"outputs": [
|
368 |
+
{
|
369 |
+
"name": "stdout",
|
370 |
+
"output_type": "stream",
|
371 |
+
"text": [
|
372 |
+
"Requirement already satisfied: datasets in /opt/conda/lib/python3.8/site-packages (1.18.2.dev0)\n",
|
373 |
+
"Collecting datasets\n",
|
374 |
+
" Downloading datasets-1.18.3-py3-none-any.whl (311 kB)\n",
|
375 |
+
" |ββββββββββββββββββββββββββββββββ| 311 kB 11.0 MB/s \n",
|
376 |
+
"\u001b[?25hRequirement already satisfied: aiohttp in /opt/conda/lib/python3.8/site-packages (from datasets) (3.8.1)\n",
|
377 |
+
"Requirement already satisfied: huggingface-hub<1.0.0,>=0.1.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (0.4.0)\n",
|
378 |
+
"Requirement already satisfied: dill in /opt/conda/lib/python3.8/site-packages (from datasets) (0.3.4)\n",
|
379 |
+
"Requirement already satisfied: xxhash in /opt/conda/lib/python3.8/site-packages (from datasets) (2.0.2)\n",
|
380 |
+
"Requirement already satisfied: multiprocess in /opt/conda/lib/python3.8/site-packages (from datasets) (0.70.12.2)\n",
|
381 |
+
"Requirement already satisfied: pandas in /opt/conda/lib/python3.8/site-packages (from datasets) (1.4.0)\n",
|
382 |
+
"Requirement already satisfied: pyarrow!=4.0.0,>=3.0.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (6.0.1)\n",
|
383 |
+
"Requirement already satisfied: requests>=2.19.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (2.24.0)\n",
|
384 |
+
"Requirement already satisfied: fsspec[http]>=2021.05.0 in /opt/conda/lib/python3.8/site-packages (from datasets) (2022.1.0)\n",
|
385 |
+
"Requirement already satisfied: numpy>=1.17 in /opt/conda/lib/python3.8/site-packages (from datasets) (1.19.2)\n",
|
386 |
+
"Requirement already satisfied: tqdm>=4.62.1 in /opt/conda/lib/python3.8/site-packages (from datasets) (4.62.3)\n",
|
387 |
+
"Requirement already satisfied: packaging in /opt/conda/lib/python3.8/site-packages (from datasets) (21.3)\n",
|
388 |
+
"Requirement already satisfied: filelock in /opt/conda/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (3.0.12)\n",
|
389 |
+
"Requirement already satisfied: pyyaml in /opt/conda/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (5.4.1)\n",
|
390 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/conda/lib/python3.8/site-packages (from huggingface-hub<1.0.0,>=0.1.0->datasets) (4.0.1)\n",
|
391 |
+
"Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.8/site-packages (from packaging->datasets) (3.0.7)\n",
|
392 |
+
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (1.25.11)\n",
|
393 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2020.12.5)\n",
|
394 |
+
"Requirement already satisfied: chardet<4,>=3.0.2 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (3.0.4)\n",
|
395 |
+
"Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.8/site-packages (from requests>=2.19.0->datasets) (2.10)\n",
|
396 |
+
"Requirement already satisfied: multidict<7.0,>=4.5 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (6.0.2)\n",
|
397 |
+
"Requirement already satisfied: frozenlist>=1.1.1 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (1.3.0)\n",
|
398 |
+
"Requirement already satisfied: charset-normalizer<3.0,>=2.0 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (2.0.10)\n",
|
399 |
+
"Requirement already satisfied: yarl<2.0,>=1.0 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (1.7.2)\n",
|
400 |
+
"Requirement already satisfied: attrs>=17.3.0 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (21.4.0)\n",
|
401 |
+
"Requirement already satisfied: aiosignal>=1.1.2 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (1.2.0)\n",
|
402 |
+
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /opt/conda/lib/python3.8/site-packages (from aiohttp->datasets) (4.0.2)\n",
|
403 |
+
"Requirement already satisfied: pytz>=2020.1 in /opt/conda/lib/python3.8/site-packages (from pandas->datasets) (2021.1)\n",
|
404 |
+
"Requirement already satisfied: python-dateutil>=2.8.1 in /opt/conda/lib/python3.8/site-packages (from pandas->datasets) (2.8.2)\n",
|
405 |
+
"Requirement already satisfied: six>=1.5 in /opt/conda/lib/python3.8/site-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.15.0)\n",
|
406 |
+
"Installing collected packages: datasets\n",
|
407 |
+
" Attempting uninstall: datasets\n",
|
408 |
+
" Found existing installation: datasets 1.18.2.dev0\n",
|
409 |
+
" Uninstalling datasets-1.18.2.dev0:\n",
|
410 |
+
"\u001b[31mERROR: Could not install packages due to an OSError: [Errno 13] Permission denied: 'entry_points.txt'\n",
|
411 |
+
"Consider using the `--user` option or check the permissions.\n",
|
412 |
+
"\u001b[0m\n",
|
413 |
+
"\u001b[33mWARNING: You are using pip version 21.3.1; however, version 22.0.2 is available.\n",
|
414 |
+
"You should consider upgrading via the '/opt/conda/bin/python -m pip install --upgrade pip' command.\u001b[0m\n"
|
415 |
+
]
|
416 |
+
}
|
417 |
+
],
|
418 |
+
"source": [
|
419 |
+
"!pip install --upgrade datasets"
|
420 |
+
]
|
421 |
+
},
|
422 |
{
|
423 |
"cell_type": "code",
|
424 |
"execution_count": 6,
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2991
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:093731fe91be156fc7e4a872c63f46f6ccef7059d892048324989ebf1c39c91e
|
3 |
size 2991
|