Cahya Wirawan commited on
Commit
cf6909c
1 Parent(s): b080d5b

add llama model

Browse files
adapter_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "/fsx/cahya/Work/models/llama-7b-hf/",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "lora_alpha": 32,
8
+ "lora_dropout": 0.1,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "q_proj",
15
+ "v_proj"
16
+ ],
17
+ "task_type": "CAUSAL_LM"
18
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84c13a07439a8ce580bc510ec17e28477f2ac5b67f2c6bd4e40c0911bb2f333b
3
+ size 16797565
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "",
3
+ "clean_up_tokenization_spaces": false,
4
+ "eos_token": "",
5
+ "model_max_length": 1000000000000000019884624838656,
6
+ "special_tokens_map_file": "/fsx/cahya/Work/models/llama-7b-hf/special_tokens_map.json",
7
+ "tokenizer_class": "LlamaTokenizer",
8
+ "unk_token": ""
9
+ }