scottsuk0306 commited on
Commit
a3de817
1 Parent(s): abb2d09

Model save

Browse files
README.md CHANGED
@@ -3,16 +3,12 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: alignment-handbook/zephyr-7b-sft-full
5
  tags:
6
- - alignment-handbook
7
- - trl
8
- - sft
9
- - generated_from_trainer
10
  - trl
11
  - sft
12
  - alignment-handbook
13
  - generated_from_trainer
14
  datasets:
15
- - scottsuk0306/DepthQA
16
  model-index:
17
  - name: zephyr-7b-stem-train
18
  results: []
@@ -23,9 +19,9 @@ should probably proofread and complete it, then remove this comment. -->
23
 
24
  # zephyr-7b-stem-train
25
 
26
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the scottsuk0306/DepthQA dataset.
27
  It achieves the following results on the evaluation set:
28
- - Loss: 1.0544
29
 
30
  ## Model description
31
 
 
3
  license: apache-2.0
4
  base_model: alignment-handbook/zephyr-7b-sft-full
5
  tags:
 
 
 
 
6
  - trl
7
  - sft
8
  - alignment-handbook
9
  - generated_from_trainer
10
  datasets:
11
+ - generator
12
  model-index:
13
  - name: zephyr-7b-stem-train
14
  results: []
 
19
 
20
  # zephyr-7b-stem-train
21
 
22
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the generator dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.4948
25
 
26
  ## Model description
27
 
all_results.json CHANGED
@@ -7,8 +7,8 @@
7
  "eval_steps_per_second": 0.211,
8
  "total_flos": 8375186227200.0,
9
  "train_loss": 0.0,
10
- "train_runtime": 1.4867,
11
- "train_samples": 424,
12
- "train_samples_per_second": 208.516,
13
- "train_steps_per_second": 6.726
14
  }
 
7
  "eval_steps_per_second": 0.211,
8
  "total_flos": 8375186227200.0,
9
  "train_loss": 0.0,
10
+ "train_runtime": 1.4498,
11
+ "train_samples": 851,
12
+ "train_samples_per_second": 331.078,
13
+ "train_steps_per_second": 6.897
14
  }
config.json CHANGED
@@ -22,6 +22,6 @@
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.44.2",
25
- "use_cache": true,
26
  "vocab_size": 32000
27
  }
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.44.2",
25
+ "use_cache": false,
26
  "vocab_size": 32000
27
  }
train_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 10.0,
3
  "total_flos": 8375186227200.0,
4
  "train_loss": 0.0,
5
- "train_runtime": 1.4867,
6
- "train_samples": 424,
7
- "train_samples_per_second": 208.516,
8
- "train_steps_per_second": 6.726
9
  }
 
2
  "epoch": 10.0,
3
  "total_flos": 8375186227200.0,
4
  "train_loss": 0.0,
5
+ "train_runtime": 1.4498,
6
+ "train_samples": 851,
7
+ "train_samples_per_second": 331.078,
8
+ "train_steps_per_second": 6.897
9
  }
trainer_state.json CHANGED
@@ -106,9 +106,9 @@
106
  "step": 10,
107
  "total_flos": 8375186227200.0,
108
  "train_loss": 0.0,
109
- "train_runtime": 1.4867,
110
- "train_samples_per_second": 208.516,
111
- "train_steps_per_second": 6.726
112
  }
113
  ],
114
  "logging_steps": 5,
 
106
  "step": 10,
107
  "total_flos": 8375186227200.0,
108
  "train_loss": 0.0,
109
+ "train_runtime": 1.4498,
110
+ "train_samples_per_second": 331.078,
111
+ "train_steps_per_second": 6.897
112
  }
113
  ],
114
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c932eb51d0afc6b0a54ae5af2685e50851245e4ce74b9d4302d9db0152f98346
3
  size 6968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fafca06e48dba2cf4dbebe50db64deb5d5992b81d6b82eb5e94b3b870e96afb2
3
  size 6968