ZoeMC commited on
Commit
fd41599
·
1 Parent(s): ec4d372

Saving weights and logs of step 10000

Browse files
.gitattributes CHANGED
@@ -26,3 +26,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ chemT5_data.csv filter=lfs diff=lfs merge=lfs -text
30
+ chemT5_data.tsv filter=lfs diff=lfs merge=lfs -text
__pycache__/pretokenizer.cpython-39.pyc ADDED
Binary file (1.08 kB). View file
 
__pycache__/t5_tokenizer_model.cpython-39.pyc ADDED
Binary file (5.02 kB). View file
 
chemT5_data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:790657db4eff6c29407874fc4eb06ecfa134b91f924a44c215a0bf8b556ad307
3
+ size 48054222
chemT5_data.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:790657db4eff6c29407874fc4eb06ecfa134b91f924a44c215a0bf8b556ad307
3
+ size 48054222
config.json CHANGED
@@ -23,5 +23,5 @@
23
  "tie_word_embeddings": false,
24
  "transformers_version": "4.11.3",
25
  "use_cache": true,
26
- "vocab_size": 32003
27
  }
 
23
  "tie_word_embeddings": false,
24
  "transformers_version": "4.11.3",
25
  "use_cache": true,
26
+ "vocab_size": 32103
27
  }
dataset-clean.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import re
4
+ from t5_tokenizer_model import SentencePieceUnigramTokenizer
5
+ #from pretokenizer import atomwise_tokenizer
6
+ from tqdm import tqdm
7
+
8
+
9
+
10
+ vocab_size = 32_000
11
+ input_sentence_size = None
12
+
13
+ # Initialize a dataset
14
+ #dataset = load_dataset('csv', data_files='/home/zoez/Chem-T5/train-file.csv',split="train")
15
+ dataset = pd.read_csv('./chemT5_data.csv')#('/home/zoez/Chem-T5/train-file.csv')
16
+ #print(dataset.iloc[0])
17
+
18
+ dataset=pd.DataFrame(columns=['SMILES'],data=dataset)
19
+ #dataset.drop('Unnamed: 0',1)
20
+ #print(dataset.columns)
21
+ dataset.columns=['SMILES']
22
+
23
+
24
+ dataset.fillna('', inplace=True)
25
+ dataset.to_csv('chemT5_data.csv',sep = ' ')
events.out.tfevents.1650698399.toxicgpu.cs.vt.edu.18283.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a3ecf146d2472a755a7baf126d63da3ded9b5bee6b34b42c6cf072fa341c006
3
+ size 40
events.out.tfevents.1650698554.toxicgpu.cs.vt.edu.18776.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9680a4a8dc371b6e7c350cb76781f4c373c86ddefcd589b17f70eb3b2a745752
3
+ size 1450993
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d99ed12fc3df890828fc608bde1949bb19fce1d45e4117685d366f0b31787a9
3
+ size 990170015
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "T5Tokenizer"}
train_scprit.sh CHANGED
@@ -6,8 +6,8 @@ python run_t5_mlm_flax.py \
6
  --tokenizer_name="./" \
7
  --train_file="chemT5_data.csv" \
8
  --max_seq_length="256" \
9
- --per_device_train_batch_size="16" \
10
- --per_device_eval_batch_size="16" \
11
  --adafactor \
12
  --learning_rate="0.005" \
13
  --weight_decay="0.001" \
 
6
  --tokenizer_name="./" \
7
  --train_file="chemT5_data.csv" \
8
  --max_seq_length="256" \
9
+ --per_device_train_batch_size="8" \
10
+ --per_device_eval_batch_size="8" \
11
  --adafactor \
12
  --learning_rate="0.005" \
13
  --weight_decay="0.001" \