Muthukumaran commited on
Commit
d7d2441
1 Parent(s): e0a40ff

Initial Upload

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 1.4607645520019532,
4
+ "train_runtime": 81957.8858,
5
+ "train_samples": 0,
6
+ "train_samples_per_second": 1220.139,
7
+ "train_steps_per_second": 1.22
8
+ }
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/NASA-WatBERT-300M-NASA-BioMedical/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_300000steps/checkpoint-300000",
3
+ "architectures": [
4
+ "RobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.28.1",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 50265
27
+ }
log.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ /opt/share/exec/jbsub8 -cores 8+2 -require a100_80gb && hname!=cccxc607 -queue nonstandard -proj embeddings-kd -name NASA-ft -mem 100G -out /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/Stagewise_stage1/NASA-WatBERT-300M-NASA-BioMedical-300K/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_100000steps/sent_embed.out -err /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/Stagewise_stage1/NASA-WatBERT-300M-NASA-BioMedical-300K/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_100000steps/sent_embed.err PYTHONPATH=/dccstor/aashka1/onekd deepspeed --master_port 29519 /dccstor/aashka1/onekd/onekd/distillation/text_embeddings/simlm/distil_similarity_scores/train_simlm.py --deepspeed /dccstor/aashka1/onekd/onekd/distillation/text_embeddings/simlm/ds_config.json --model_name_or_path /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/NASA-WatBERT-300M-NASA-BioMedical/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_300000steps/checkpoint-300000 --freeze_pos_emb true --pooling_source mean --per_device_train_batch_size 500 --per_device_eval_batch_size 32 --add_pooler False --t 0.02 --seed 42 --do_train --data_dir /dccstor/aashka1/sentence_embeddings/data/sent_embed_data/ --train_data_config //dccstor/phalanx/aashka/sentence-embeddings-NASA/configs/data_config_20M_NASA_stage1_nogooaq_nohotpot_nofever.json --validation_file /dccstor/retrieve-rerank2/data/embeddings-training-data/simlm/dev_w_docs.jsonl --fp16 --q_max_len 64 --p_max_len 256 --train_n_passages 1 --dataloader_num_workers 2 --num_train_epochs 1 --learning_rate 2e-5 --use_scaled_loss True --warmup_steps 1000 --share_encoder True --logging_steps 1000 --output_dir /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/Stagewise_stage1/NASA-WatBERT-300M-NASA-BioMedical-300K/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_100000steps --save_total_limit 3 --save_strategy steps --save_steps 100000 --remove_unused_columns False --overwrite_output_dir --disable_tqdm False --max_steps 100000 --report_to none
2
+ # bsub -q nonstandard -g /aashka/_/embeddings-kd -J NASA-ft -M 102400 -hl -n 8 -R "select[a100_80gb && hname!=cccxc607] rusage[mem=112640] span[ptile=8] affinity[core(1)]" -oo /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/Stagewise_stage1/NASA-WatBERT-300M-NASA-BioMedical-300K/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_100000steps/sent_embed.out -eo /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/Stagewise_stage1/NASA-WatBERT-300M-NASA-BioMedical-300K/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_100000steps/sent_embed.err -gpu num=2:mode=exclusive_process PYTHONPATH=/dccstor/aashka1/onekd deepspeed --master_port 29519 /dccstor/aashka1/onekd/onekd/distillation/text_embeddings/simlm/distil_similarity_scores/train_simlm.py --deepspeed /dccstor/aashka1/onekd/onekd/distillation/text_embeddings/simlm/ds_config.json --model_name_or_path /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/NASA-WatBERT-300M-NASA-BioMedical/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_300000steps/checkpoint-300000 --freeze_pos_emb true --pooling_source mean --per_device_train_batch_size 500 --per_device_eval_batch_size 32 --add_pooler False --t 0.02 --seed 42 --do_train --data_dir /dccstor/aashka1/sentence_embeddings/data/sent_embed_data/ --train_data_config //dccstor/phalanx/aashka/sentence-embeddings-NASA/configs/data_config_20M_NASA_stage1_nogooaq_nohotpot_nofever.json --validation_file /dccstor/retrieve-rerank2/data/embeddings-training-data/simlm/dev_w_docs.jsonl --fp16 --q_max_len 64 --p_max_len 256 --train_n_passages 1 --dataloader_num_workers 2 --num_train_epochs 1 --learning_rate 2e-5 --use_scaled_loss True --warmup_steps 1000 --share_encoder True --logging_steps 1000 --output_dir /dccstor/aashka1/sentence_embeddings/output/NASA_finetuning_v2/Stagewise_stage1/NASA-WatBERT-300M-NASA-BioMedical-300K/2GPU/FinetuneNASA_pooling_mean_qlen64_plen256_freeze_embedding_true_lr_2e-5_bsz_500_100000steps --save_total_limit 3 --save_strategy steps --save_steps 100000 --remove_unused_columns False --overwrite_output_dir --disable_tqdm False --max_steps 100000 --report_to none
3
+ Job <722256> is submitted to queue <nonstandard>.
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0143a266f7ab79c9490c39331c7c39f6ca4c1e4f9519bfd738de18e53e64722d
3
+ size 249322753
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "cls_token": "<s>",
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "mask_token": "<mask>",
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
+ "tokenizer_class": "RobertaTokenizer",
13
+ "trim_offsets": true,
14
+ "unk_token": "<unk>"
15
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 1.4607645520019532,
4
+ "train_runtime": 81957.8858,
5
+ "train_samples": 0,
6
+ "train_samples_per_second": 1220.139,
7
+ "train_steps_per_second": 1.22
8
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff