diff --git "a/train.log" "b/train.log" --- "a/train.log" +++ "b/train.log" @@ -1,9 +1,9 @@ -2024-05-15 14:29:25.440457: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered -2024-05-15 14:29:25.440508: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered -2024-05-15 14:29:25.442473: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered -2024-05-15 14:29:26.567248: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT -05/15/2024 14:29:28 - WARNING - __main__ - Process rank: 0, device: cuda:0, n_gpu: 1distributed training: True, 16-bits training: False -05/15/2024 14:29:28 - INFO - __main__ - Training/evaluation parameters TrainingArguments( +2024-05-15 14:31:11.490571: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered +2024-05-15 14:31:11.490622: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered +2024-05-15 14:31:11.492868: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered +2024-05-15 14:31:12.620788: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT +05/15/2024 14:31:14 - WARNING - __main__ - Process rank: 0, device: cuda:0, n_gpu: 1distributed training: True, 16-bits training: False +05/15/2024 14:31:14 - INFO - __main__ - Training/evaluation parameters TrainingArguments( _n_gpu=1, accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'gradient_accumulation_kwargs': None}, adafactor=False, @@ -30,7 +30,7 @@ disable_tqdm=False, dispatch_batches=None, do_eval=True, do_predict=True, -do_train=False, +do_train=True, eval_accumulation_steps=None, eval_delay=0, eval_do_concat_batches=True, @@ -130,17 +130,17 @@ weight_decay=0.0, You can avoid this message in future by passing the argument `trust_remote_code=True`. Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. warnings.warn( - Downloading builder script: 0%| | 0.00/3.54k [00:00> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json -[INFO|configuration_utils.py:789] 2024-05-15 14:29:40,289 >> Model config RobertaConfig { +[INFO|configuration_utils.py:726] 2024-05-15 14:31:25,038 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json +[INFO|configuration_utils.py:789] 2024-05-15 14:31:25,042 >> Model config RobertaConfig { "_name_or_path": "PlanTL-GOB-ES/bsc-bio-ehr-es", "architectures": [ "RobertaForMaskedLM" @@ -179,8 +179,8 @@ Passing `trust_remote_code=True` will be mandatory to load this dataset from the "vocab_size": 50262 } -[INFO|configuration_utils.py:726] 2024-05-15 14:29:40,381 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json -[INFO|configuration_utils.py:789] 2024-05-15 14:29:40,382 >> Model config RobertaConfig { +[INFO|configuration_utils.py:726] 2024-05-15 14:31:25,248 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json +[INFO|configuration_utils.py:789] 2024-05-15 14:31:25,249 >> Model config RobertaConfig { "_name_or_path": "PlanTL-GOB-ES/bsc-bio-ehr-es", "architectures": [ "RobertaForMaskedLM" @@ -208,14 +208,14 @@ Passing `trust_remote_code=True` will be mandatory to load this dataset from the "vocab_size": 50262 } -[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:29:40,392 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/vocab.json -[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:29:40,392 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/merges.txt -[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:29:40,392 >> loading file tokenizer.json from cache at None -[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:29:40,392 >> loading file added_tokens.json from cache at None -[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:29:40,392 >> loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/special_tokens_map.json -[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:29:40,392 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/tokenizer_config.json -[INFO|configuration_utils.py:726] 2024-05-15 14:29:40,392 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json -[INFO|configuration_utils.py:789] 2024-05-15 14:29:40,393 >> Model config RobertaConfig { +[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:31:25,259 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/vocab.json +[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:31:25,259 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/merges.txt +[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:31:25,259 >> loading file tokenizer.json from cache at None +[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:31:25,259 >> loading file added_tokens.json from cache at None +[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:31:25,259 >> loading file special_tokens_map.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/special_tokens_map.json +[INFO|tokenization_utils_base.py:2087] 2024-05-15 14:31:25,259 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/tokenizer_config.json +[INFO|configuration_utils.py:726] 2024-05-15 14:31:25,259 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json +[INFO|configuration_utils.py:789] 2024-05-15 14:31:25,260 >> Model config RobertaConfig { "_name_or_path": "PlanTL-GOB-ES/bsc-bio-ehr-es", "architectures": [ "RobertaForMaskedLM" @@ -243,8 +243,8 @@ Passing `trust_remote_code=True` will be mandatory to load this dataset from the "vocab_size": 50262 } -[INFO|configuration_utils.py:726] 2024-05-15 14:29:40,477 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json -[INFO|configuration_utils.py:789] 2024-05-15 14:29:40,478 >> Model config RobertaConfig { +[INFO|configuration_utils.py:726] 2024-05-15 14:31:25,341 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/config.json +[INFO|configuration_utils.py:789] 2024-05-15 14:31:25,341 >> Model config RobertaConfig { "_name_or_path": "PlanTL-GOB-ES/bsc-bio-ehr-es", "architectures": [ "RobertaForMaskedLM" @@ -272,72 +272,155 @@ Passing `trust_remote_code=True` will be mandatory to load this dataset from the "vocab_size": 50262 } -[INFO|modeling_utils.py:3429] 2024-05-15 14:29:40,721 >> loading weights file pytorch_model.bin from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/pytorch_model.bin -[INFO|modeling_utils.py:4160] 2024-05-15 14:29:40,847 >> Some weights of the model checkpoint at PlanTL-GOB-ES/bsc-bio-ehr-es were not used when initializing RobertaForTokenClassification: ['lm_head.bias', 'lm_head.decoder.bias', 'lm_head.decoder.weight', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.bias', 'lm_head.layer_norm.weight'] +[INFO|modeling_utils.py:3429] 2024-05-15 14:31:25,586 >> loading weights file pytorch_model.bin from cache at /root/.cache/huggingface/hub/models--PlanTL-GOB-ES--bsc-bio-ehr-es/snapshots/1e543adb2d21f19d85a89305eebdbd64ab656b99/pytorch_model.bin +[INFO|modeling_utils.py:4160] 2024-05-15 14:31:25,711 >> Some weights of the model checkpoint at PlanTL-GOB-ES/bsc-bio-ehr-es were not used when initializing RobertaForTokenClassification: ['lm_head.bias', 'lm_head.decoder.bias', 'lm_head.decoder.weight', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.bias', 'lm_head.layer_norm.weight'] - This IS expected if you are initializing RobertaForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing RobertaForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). -[WARNING|modeling_utils.py:4172] 2024-05-15 14:29:40,847 >> Some weights of RobertaForTokenClassification were not initialized from the model checkpoint at PlanTL-GOB-ES/bsc-bio-ehr-es and are newly initialized: ['classifier.bias', 'classifier.weight'] +[WARNING|modeling_utils.py:4172] 2024-05-15 14:31:25,711 >> Some weights of RobertaForTokenClassification were not initialized from the model checkpoint at PlanTL-GOB-ES/bsc-bio-ehr-es and are newly initialized: ['classifier.bias', 'classifier.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. - Map: 0%| | 0/6807 [00:00> The following columns in the evaluation set don't have a corresponding argument in `RobertaForTokenClassification.forward` and have been ignored: id, ner_tags, tokens. If id, ner_tags, tokens are not expected by `RobertaForTokenClassification.forward`, you can safely ignore this message. -[INFO|trainer.py:3614] 2024-05-15 14:29:44,607 >> ***** Running Evaluation ***** -[INFO|trainer.py:3616] 2024-05-15 14:29:44,608 >> Num examples = 6807 -[INFO|trainer.py:3619] 2024-05-15 14:29:44,608 >> Batch size = 8 - 0%| | 0/851 [00:00> The following columns in the test set don't have a corresponding argument in `RobertaForTokenClassification.forward` and have been ignored: id, ner_tags, tokens. If id, ner_tags, tokens are not expected by `RobertaForTokenClassification.forward`, you can safely ignore this message. -[INFO|trainer.py:3614] 2024-05-15 14:30:01,184 >> ***** Running Prediction ***** -[INFO|trainer.py:3616] 2024-05-15 14:30:01,184 >> Num examples = 6807 -[INFO|trainer.py:3619] 2024-05-15 14:30:01,184 >> Batch size = 8 - 0%| | 0/851 [00:00> Saving model checkpoint to /content/dissertation/scripts/ner/output -[INFO|configuration_utils.py:471] 2024-05-15 14:30:17,256 >> Configuration saved in /content/dissertation/scripts/ner/output/config.json -[INFO|modeling_utils.py:2590] 2024-05-15 14:30:18,218 >> Model weights saved in /content/dissertation/scripts/ner/output/model.safetensors -[INFO|tokenization_utils_base.py:2488] 2024-05-15 14:30:18,219 >> tokenizer config file saved in /content/dissertation/scripts/ner/output/tokenizer_config.json -[INFO|tokenization_utils_base.py:2497] 2024-05-15 14:30:18,219 >> Special tokens file saved in /content/dissertation/scripts/ner/output/special_tokens_map.json -[INFO|modelcard.py:450] 2024-05-15 14:30:18,357 >> Dropping the following result as it does not have all the necessary fields: -{'task': {'name': 'Token Classification', 'type': 'token-classification'}, 'dataset': {'name': 'Rodrigo1771/drugtemist-ner', 'type': 'Rodrigo1771/drugtemist-ner', 'config': 'DrugTEMIST NER', 'split': 'validation', 'args': 'DrugTEMIST NER'}} -***** predict metrics ***** - predict_accuracy = 0.0288 - predict_f1 = 0.0056 - predict_loss = 1.293 - predict_precision = 0.0028 - predict_recall = 0.2693 - predict_runtime = 0:00:15.76 - predict_samples_per_second = 431.659 - predict_steps_per_second = 53.965 - model.safetensors: 0%| | 0.00/496M [00:00> The following columns in the training set don't have a corresponding argument in `RobertaForTokenClassification.forward` and have been ignored: ner_tags, tokens, id. If ner_tags, tokens, id are not expected by `RobertaForTokenClassification.forward`, you can safely ignore this message. +[INFO|trainer.py:2048] 2024-05-15 14:31:30,117 >> ***** Running training ***** +[INFO|trainer.py:2049] 2024-05-15 14:31:30,118 >> Num examples = 27,224 +[INFO|trainer.py:2050] 2024-05-15 14:31:30,118 >> Num Epochs = 10 +[INFO|trainer.py:2051] 2024-05-15 14:31:30,118 >> Instantaneous batch size per device = 4 +[INFO|trainer.py:2054] 2024-05-15 14:31:30,118 >> Total train batch size (w. parallel, distributed & accumulation) = 16 +[INFO|trainer.py:2055] 2024-05-15 14:31:30,118 >> Gradient Accumulation steps = 4 +[INFO|trainer.py:2056] 2024-05-15 14:31:30,118 >> Total optimization steps = 17,010 +[INFO|trainer.py:2057] 2024-05-15 14:31:30,118 >> Number of trainable parameters = 124,055,043 + 0%| | 0/17010 [00:00> The following columns in the evaluation set don't have a corresponding argument in `RobertaForTokenClassification.forward` and have been ignored: ner_tags, tokens, id. If ner_tags, tokens, id are not expected by `RobertaForTokenClassification.forward`, you can safely ignore this message. +[INFO|trainer.py:3614] 2024-05-15 14:36:45,310 >> ***** Running Evaluation ***** +[INFO|trainer.py:3616] 2024-05-15 14:36:45,311 >> Num examples = 6807 +[INFO|trainer.py:3619] 2024-05-15 14:36:45,311 >> Batch size = 8 +{'loss': 0.0278, 'grad_norm': 0.008281867019832134, 'learning_rate': 4.853027630805409e-05, 'epoch': 0.29} +{'loss': 0.0082, 'grad_norm': 0.7454735040664673, 'learning_rate': 4.7060552616108174e-05, 'epoch': 0.59} +{'loss': 0.0055, 'grad_norm': 0.028717944398522377, 'learning_rate': 4.559082892416226e-05, 'epoch': 0.88} - Upload 3 LFS files: 0%| | 0/3 [00:00> Saving model checkpoint to /content/dissertation/scripts/ner/output/checkpoint-1701 +[INFO|configuration_utils.py:471] 2024-05-15 14:37:01,008 >> Configuration saved in /content/dissertation/scripts/ner/output/checkpoint-1701/config.json +[INFO|modeling_utils.py:2590] 2024-05-15 14:37:01,983 >> Model weights saved in /content/dissertation/scripts/ner/output/checkpoint-1701/model.safetensors +[INFO|tokenization_utils_base.py:2488] 2024-05-15 14:37:01,984 >> tokenizer config file saved in /content/dissertation/scripts/ner/output/checkpoint-1701/tokenizer_config.json +[INFO|tokenization_utils_base.py:2497] 2024-05-15 14:37:01,984 >> Special tokens file saved in /content/dissertation/scripts/ner/output/checkpoint-1701/special_tokens_map.json +[INFO|tokenization_utils_base.py:2488] 2024-05-15 14:37:06,395 >> tokenizer config file saved in /content/dissertation/scripts/ner/output/tokenizer_config.json +[INFO|tokenization_utils_base.py:2497] 2024-05-15 14:37:06,395 >> Special tokens file saved in /content/dissertation/scripts/ner/output/special_tokens_map.json + 10%|█ | 1702/17010 [05:36<27:45:55, 6.53s/it] 10%|█ | 1703/17010 [05:36<19:40:16, 4.63s/it] 10%|█ | 1704/17010 [05:36<14:00:44, 3.30s/it] 10%|█ | 1705/17010 [05:36<10:04:42, 2.37s/it] 10%|█ | 1706/17010 [05:37<7:18:56, 1.72s/it] 10%|█ | 1707/17010 [05:37<5:21:42, 1.26s/it] 10%|█ | 1708/17010 [05:37<3:59:24, 1.07it/s] 10%|█ | 1709/17010 [05:37<3:01:36, 1.40it/s] 10%|█ | 1710/17010 [05:37<2:21:24, 1.80it/s] 10%|█ | 1711/17010 [05:38<1:53:18, 2.25it/s] 10%|█ | 1712/17010 [05:38<1:34:16, 2.70it/s] 10%|█ | 1713/17010 [05:38<1:21:21, 3.13it/s] 10%|█ | 1714/17010 [05:38<1:11:29, 3.57it/s] 10%|█ | 1715/17010 [05:38<1:04:55, 3.93it/s] 10%|█ | 1716/17010 [05:39<1:00:19, 4.22it/s] 10%|█ | 1717/17010 [05:39<57:03, 4.47it/s] 10%|█ | 1718/17010 [05:39<54:26, 4.68it/s] 10%|█ | 1719/17010 [05:39<52:47, 4.83it/s] 10%|█ | 1720/17010 [05:39<51:32, 4.94it/s] 10%|█ | 1721/17010 [05:40<50:29, 5.05it/s] 10%|█ | 1722/17010 [05:40<49:28, 5.15it/s] 10%|█ | 1723/17010 [05:40<49:39, 5.13it/s] 10%|█ | 1724/17010 [05:40<48:44, 5.23it/s] 10%|█ | 1725/17010 [05:40<48:32, 5.25it/s] 10%|█ | 1726/17010 [05:41<48:19, 5.27it/s] 10%|█ | 1727/17010 [05:41<47:43, 5.34it/s] 10%|█ | 1728/17010 [05:41<48:21, 5.27it/s] 10%|█ | 1729/17010 [05:41<47:52, 5.32it/s] 10%|█ | 1730/17010 [05:41<47:56, 5.31it/s] 10%|█ | 1731/17010 [05:41<47:28, 5.36it/s] 10%|█ | 1732/17010 [05:42<47:56, 5.31it/s] 10%|█ | 1733/17010 [05:42<48:07, 5.29it/s] 10%|█ | 1734/17010 [05:42<48:01, 5.30it/s] 10%|█ | 1735/17010 [05:42<47:31, 5.36it/s] 10%|█ | 1736/17010 [05:42<47:35, 5.35it/s] 10%|█ | 1737/17010 [05:43<48:27, 5.25it/s] 10%|█ | 1738/17010 [05:43<49:13, 5.17it/s] 10%|█ | 1739/17010 [05:43<48:27, 5.25it/s] 10%|█ | 1740/17010 [05:43<48:51, 5.21it/s] 10%|█ | 1741/17010 [05:43<48:07, 5.29it/s] 10%|█ | 1742/17010 [05:44<47:54, 5.31it/s] 10%|█ | 1743/17010 [05:44<48:07, 5.29it/s] 10%|█ | 1744/17010 [05:44<48:40, 5.23it/s] 10%|█ | 1745/17010 [05:44<47:46, 5.33it/s] 10%|█ | 1746/17010 [05:44<47:49, 5.32it/s] 10%|█ | 1747/17010 [05:44<48:55, 5.20it/s] 10%|█ | 1748/17010 [05:45<48:29, 5.24it/s] 10%|█ | 1749/17010 [05:45<48:33, 5.24it/s] 10%|█ | 1750/17010 [05:45<48:14, 5.27it/s] 10%|█ | 1751/17010 [05:45<47:48, 5.32it/s] 10%|█ | 1752/17010 [05:45<47:44, 5.33it/s] 10%|█ | 1753/17010 [05:46<48:04, 5.29it/s] 10%|█ | 1754/17010 [05:46<48:46, 5.21it/s] 10%|█ | 1755/17010 [05:46<48:22, 5.26it/s] 10%|█ | 1756/17010 [05:46<48:09, 5.28it/s] 10%|█ | 1757/17010 [05:46<50:09, 5.07it/s] 10%|█ | 1758/17010 [05:47<50:07, 5.07it/s] 10%|█ | 1759/17010 [05:47<49:26, 5.14it/s] 10%|█ | 1760/17010 [05:47<48:54, 5.20it/s] 10%|█ | 1761/17010 [05:47<48:45, 5.21it/s] 10%|█ | 1762/17010 [05:47<48:10, 5.27it/s] 10%|█ | 1763/17010 [05:48<48:00, 5.29it/s] 10%|█ | 1764/17010 [05:48<47:34, 5.34it/s] 10%|█ | 1765/17010 [05:48<48:17, 5.26it/s] 10%|█ | 1766/17010 [05:48<48:12, 5.27it/s] 10%|█ | 1767/17010 [05:48<48:56, 5.19it/s] 10%|█ | 1768/17010 [05:48<48:08, 5.28it/s] 10%|█ | 1769/17010 [05:49<47:58, 5.29it/s] 10%|█ | 1770/17010 [05:49<48:07, 5.28it/s] 10%|█ | 1771/17010 [05:49<47:50, 5.31it/s] 10%|█ | 1772/17010 [05:49<48:18, 5.26it/s] 10%|█ | 1773/17010 [05:49<47:32, 5.34it/s] 10%|█ | 1774/17010 [05:50<50:26, 5.03it/s] 10%|█ | 1775/17010 [05:50<49:27, 5.13it/s] 10%|█ | 1776/17010 [05:50<50:13, 5.05it/s] 10%|█ | 1777/17010 [05:50<48:58, 5.18it/s] 10%|█ | 1778/17010 [05:50<48:03, 5.28it/s] 10%|█ | 1779/17010 [05:51<48:50, 5.20it/s] 10%|█ | 1780/17010 [05:51<48:28, 5.24it/s] 10%|█ | 1781/17010 [05:51<48:01, 5.29it/s] 10%|█ | 1782/17010 [05:51<47:53, 5.30it/s] \ No newline at end of file