terry69 commited on
Commit
62f00bf
1 Parent(s): d18afb1

Model save

Browse files
README.md CHANGED
@@ -3,16 +3,10 @@ library_name: transformers
3
  license: llama3.2
4
  base_model: meta-llama/Llama-3.2-1B-Instruct
5
  tags:
6
- - alignment-handbook
7
- - trl
8
- - sft
9
- - generated_from_trainer
10
  - trl
11
  - sft
12
  - alignment-handbook
13
  - generated_from_trainer
14
- datasets:
15
- - preference-data
16
  model-index:
17
  - name: llama3.2_feedback_1b
18
  results: []
@@ -23,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
23
 
24
  # llama3.2_feedback_1b
25
 
26
- This model is a fine-tuned version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) on the preference-data dataset.
27
 
28
  ## Model description
29
 
 
3
  license: llama3.2
4
  base_model: meta-llama/Llama-3.2-1B-Instruct
5
  tags:
 
 
 
 
6
  - trl
7
  - sft
8
  - alignment-handbook
9
  - generated_from_trainer
 
 
10
  model-index:
11
  - name: llama3.2_feedback_1b
12
  results: []
 
17
 
18
  # llama3.2_feedback_1b
19
 
20
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) on the None dataset.
21
 
22
  ## Model description
23
 
all_results.json CHANGED
@@ -6,8 +6,8 @@
6
  "eval_steps_per_second": 2.241,
7
  "total_flos": 44008382398464.0,
8
  "train_loss": 0.0,
9
- "train_runtime": 0.009,
10
  "train_samples": 98952,
11
- "train_samples_per_second": 2937055.056,
12
- "train_steps_per_second": 183614.45
13
  }
 
6
  "eval_steps_per_second": 2.241,
7
  "total_flos": 44008382398464.0,
8
  "train_loss": 0.0,
9
+ "train_runtime": 0.0097,
10
  "train_samples": 98952,
11
+ "train_samples_per_second": 2738278.668,
12
+ "train_steps_per_second": 171187.643
13
  }
config.json CHANGED
@@ -35,6 +35,6 @@
35
  "tie_word_embeddings": true,
36
  "torch_dtype": "bfloat16",
37
  "transformers_version": "4.45.1",
38
- "use_cache": true,
39
  "vocab_size": 128256
40
  }
 
35
  "tie_word_embeddings": true,
36
  "torch_dtype": "bfloat16",
37
  "transformers_version": "4.45.1",
38
+ "use_cache": false,
39
  "vocab_size": 128256
40
  }
runs/Sep30_22-11-00_COE-CS-sv003/events.out.tfevents.1727734271.COE-CS-sv003.1385721.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fe4e1ac17388c5b2af2e837fa18dc3318a8f18c87d49e9b0e4bb7e34d8a432c
3
+ size 6324
train_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 1.0,
3
  "total_flos": 44008382398464.0,
4
  "train_loss": 0.0,
5
- "train_runtime": 0.009,
6
  "train_samples": 98952,
7
- "train_samples_per_second": 2937055.056,
8
- "train_steps_per_second": 183614.45
9
  }
 
2
  "epoch": 1.0,
3
  "total_flos": 44008382398464.0,
4
  "train_loss": 0.0,
5
+ "train_runtime": 0.0097,
6
  "train_samples": 98952,
7
+ "train_samples_per_second": 2738278.668,
8
+ "train_steps_per_second": 171187.643
9
  }
trainer_state.json CHANGED
@@ -2344,9 +2344,9 @@
2344
  "step": 1656,
2345
  "total_flos": 44008382398464.0,
2346
  "train_loss": 0.0,
2347
- "train_runtime": 0.009,
2348
- "train_samples_per_second": 2937055.056,
2349
- "train_steps_per_second": 183614.45
2350
  }
2351
  ],
2352
  "logging_steps": 5,
 
2344
  "step": 1656,
2345
  "total_flos": 44008382398464.0,
2346
  "train_loss": 0.0,
2347
+ "train_runtime": 0.0097,
2348
+ "train_samples_per_second": 2738278.668,
2349
+ "train_steps_per_second": 171187.643
2350
  }
2351
  ],
2352
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bc6ff02ea07019360ef1bfe0b453307c77a2f719e73135d435326789efd9b15
3
  size 7096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5bc2d6c482510bf6b354a53e1315f403620e02f71ff4e5ae0f9fa8faf852860
3
  size 7096