chohi commited on
Commit
0378ba2
1 Parent(s): da3d9ff

chohi/Llama-test-Ko-3-8B

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ tags:
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
- base_model: beomi/Llama-3-Open-Ko-8B
9
  model-index:
10
  - name: results
11
  results: []
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # results
18
 
19
- This model is a fine-tuned version of [beomi/Llama-3-Open-Ko-8B](https://huggingface.co/beomi/Llama-3-Open-Ko-8B) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
+ base_model: meta-llama/Meta-Llama-3-8B
9
  model-index:
10
  - name: results
11
  results: []
 
16
 
17
  # results
18
 
19
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on an unknown dataset.
20
 
21
  ## Model description
22
 
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "beomi/Llama-3-Open-Ko-8B",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Meta-Llama-3-8B",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b62f354588269602db181512910c3cb0ed78951303d91973da3a50e36cdb3291
3
  size 109069176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d9b5666cb86aa361b546261210825407a64e7b345d06ef9a4130e460f37ef41
3
  size 109069176
runs/May08_20-03-00_db756d1ad56c/events.out.tfevents.1715198581.db756d1ad56c.607.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccfab264e7f7b65d554bfbb3c059e97bcd0cc52da490a35bfe3a2399ae1e23c9
3
+ size 6161
tokenizer_config.json CHANGED
@@ -2050,7 +2050,6 @@
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
2053
- "chat_template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
2054
  "clean_up_tokenization_spaces": true,
2055
  "eos_token": "<|end_of_text|>",
2056
  "model_input_names": [
 
2050
  }
2051
  },
2052
  "bos_token": "<|begin_of_text|>",
 
2053
  "clean_up_tokenization_spaces": true,
2054
  "eos_token": "<|end_of_text|>",
2055
  "model_input_names": [
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f6f53cc0512ff2036c2f8aa93d2846b856f1d75697c45560d958e5b3570f8b7
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4411bed28119bf1d109631a50f48615e4cea0882da43936974a8109076f24f51
3
  size 4984