abhishek HF staff commited on
Commit
5abe0b9
1 Parent(s): 13a42b0

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - text-classification
6
+ widget:
7
+ - text: "I love AutoTrain"
8
+ datasets:
9
+ - imdb
10
+ ---
11
+
12
+ # Model Trained Using AutoTrain
13
+
14
+ - Problem type: Text Classification
15
+
16
+ ## Validation Metrics
17
+ No validation metrics available
checkpoint-196/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": 0,
15
+ "1": 1
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "0": 0,
21
+ "1": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.32.0.dev0",
33
+ "type_vocab_size": 2,
34
+ "use_cache": true,
35
+ "vocab_size": 30522
36
+ }
checkpoint-196/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90f46262e71ae76993e39f989deebccb1103d5415bae84ee1934ee959f4ab65e
3
+ size 876037893
checkpoint-196/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33fa8f363b1e7858ca21f9dd0463e2fcff5c21c0cb26ad85ec0eaa04cf987e0a
3
+ size 438003505
checkpoint-196/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca6eeef19e669536e711e28fb379181a971c420f202c31b18f2a7ca6701c0803
3
+ size 15607
checkpoint-196/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d436a07d301cf52c9f1e80284074a82b54ebb38a43cd991c298203ed0ec7691
3
+ size 15607
checkpoint-196/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:318cd6d635fce44872177e69a182b30dcec98234ea257198a9d6c22c6978eb87
3
+ size 627
checkpoint-196/trainer_state.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "global_step": 196,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.4,
12
+ "learning_rate": 2.0113636363636362e-05,
13
+ "loss": 0.473,
14
+ "step": 78
15
+ },
16
+ {
17
+ "epoch": 0.8,
18
+ "learning_rate": 6.818181818181818e-06,
19
+ "loss": 0.313,
20
+ "step": 156
21
+ }
22
+ ],
23
+ "max_steps": 196,
24
+ "num_train_epochs": 1,
25
+ "total_flos": 1650232534237184.0,
26
+ "trial_name": null,
27
+ "trial_params": null
28
+ }
checkpoint-196/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:991938c4c86c3f9e226ac96c4d8053b9f297b5f36e1f77ef7ec8563011da4d05
3
+ size 3963
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": 0,
15
+ "1": 1
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "0": 0,
21
+ "1": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.32.0.dev0",
33
+ "type_vocab_size": 2,
34
+ "use_cache": true,
35
+ "vocab_size": 30522
36
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33fa8f363b1e7858ca21f9dd0463e2fcff5c21c0cb26ad85ec0eaa04cf987e0a
3
+ size 438003505
runs/Aug11_10-32-12_beast/events.out.tfevents.1691742733.beast.1746407.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0a81090d6a6ac29ba2edc627805d85fced658bbf1b93d97d36d787dc3d3bff
3
+ size 4135
runs/Aug11_10-33-06_beast/events.out.tfevents.1691742786.beast.1746608.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c623ecfb5684fc3378fd28df840135181b4331a7a91bdf6199f9f83625b3b06
3
+ size 4135
runs/Aug11_10-36-47_beast/events.out.tfevents.1691743009.beast.1747152.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ff46130d623e1862bafbd62b684de3a968c2c7b03bf620f3ae929c8ea0ea76
3
+ size 4804
runs/Aug11_11-15-04_beast/events.out.tfevents.1691745305.beast.1751000.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d451ccb2a6ba36ef985be8c1d409390fc9576d82abc9847a2d3b9750c802ce8
3
+ size 4804
runs/Aug11_11-42-17_beast/events.out.tfevents.1691746939.beast.1753947.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d68f9b3ab08dee2b8d9a4dd4dca4d3d8b3d5a07e49566e8069f18be3d356abd
3
+ size 4136
runs/Aug11_11-46-02_beast/events.out.tfevents.1691747163.beast.1754528.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df4fd997666c1b14d2e2b467ae0a6bdeb7f1bda3acfb6e225c56039859de2209
3
+ size 88
runs/Aug11_11-46-34_beast/events.out.tfevents.1691747195.beast.1754668.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97833253f5fe42e6637e00cf0243d1c287aaf95787e644f8216bfe215ad836a3
3
+ size 88
runs/Aug11_11-49-18_beast/events.out.tfevents.1691747358.beast.1755130.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c379970ef4f7b8ecafe51f8836215c405c88f36224c01bab68f40a4aefc97472
3
+ size 4804
runs/Aug11_12-14-28_beast/events.out.tfevents.1691748869.beast.1757815.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e05e15035530b93d1b8dbe4e8f1a548a66715432b1b346bb23e38e943dab3ac
3
+ size 4802
runs/Aug11_12-16-04_beast/events.out.tfevents.1691748965.beast.1758195.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a768ef9c3925bc2783a64abdd535cb6a8cf45db7f1a77e093c71e4f1ff541ca4
3
+ size 4802
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:991938c4c86c3f9e226ac96c4d8053b9f297b5f36e1f77ef7ec8563011da4d05
3
+ size 3963
training_params.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "imdb",
3
+ "model_name": "bert-base-uncased",
4
+ "lr": 3e-05,
5
+ "epochs": 1,
6
+ "max_seq_length": 128,
7
+ "batch_size": 64,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": null,
17
+ "text_column": "text",
18
+ "target_column": "label",
19
+ "logging_steps": -1,
20
+ "project_name": "output",
21
+ "auto_find_batch_size": false,
22
+ "fp16": true,
23
+ "save_total_limit": 1,
24
+ "save_strategy": "epoch",
25
+ "token": null,
26
+ "push_to_hub": true,
27
+ "repo_id": "abhishek/txtclfimdb3"
28
+ }