mahimairaja commited on
Commit
9391d28
1 Parent(s): 85a9597

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +79 -25
config.json CHANGED
@@ -1,25 +1,79 @@
1
- {
2
- "_name_or_path": "mahimairaja/tweet-summarization-llama-2-finetuned",
3
- "architectures": [
4
- "PeftModelForCausalLM"
5
- ],
6
- "bos_token_id": 1,
7
- "eos_token_id": 2,
8
- "hidden_act": "silu",
9
- "hidden_size": 4096,
10
- "initializer_range": 0.02,
11
- "intermediate_size": 11008,
12
- "max_position_embeddings": 4096,
13
- "model_type": "llama",
14
- "num_attention_heads": 32,
15
- "num_hidden_layers": 32,
16
- "num_key_value_heads": 32,
17
- "pretraining_tp": 1,
18
- "rms_norm_eps": 1e-05,
19
- "rope_scaling": null,
20
- "tie_word_embeddings": false,
21
- "torch_dtype": "float16",
22
- "transformers_version": "4.31.0.dev0",
23
- "use_cache": true,
24
- "vocab_size": 32000
25
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {'vocab_size': 32000,
2
+ 'max_position_embeddings': 4096,
3
+ 'hidden_size': 4096,
4
+ 'intermediate_size': 11008,
5
+ 'num_hidden_layers': 32,
6
+ 'num_attention_heads': 32,
7
+ 'num_key_value_heads': 32,
8
+ 'hidden_act': 'silu',
9
+ 'initializer_range': 0.02,
10
+ 'rms_norm_eps': 1e-05,
11
+ 'pretraining_tp': 1,
12
+ 'use_cache': False,
13
+ 'rope_scaling': None,
14
+ 'return_dict': True,
15
+ 'output_hidden_states': False,
16
+ 'output_attentions': False,
17
+ 'torchscript': False,
18
+ 'torch_dtype': 'float16',
19
+ 'use_bfloat16': False,
20
+ 'tf_legacy_loss': False,
21
+ 'pruned_heads': {},
22
+ 'tie_word_embeddings': False,
23
+ 'is_encoder_decoder': False,
24
+ 'is_decoder': False,
25
+ 'cross_attention_hidden_size': None,
26
+ 'add_cross_attention': False,
27
+ 'tie_encoder_decoder': False,
28
+ 'max_length': 20,
29
+ 'min_length': 0,
30
+ 'do_sample': False,
31
+ 'early_stopping': False,
32
+ 'num_beams': 1,
33
+ 'num_beam_groups': 1,
34
+ 'diversity_penalty': 0.0,
35
+ 'temperature': 1.0,
36
+ 'top_k': 50,
37
+ 'top_p': 1.0,
38
+ 'typical_p': 1.0,
39
+ 'repetition_penalty': 1.0,
40
+ 'length_penalty': 1.0,
41
+ 'no_repeat_ngram_size': 0,
42
+ 'encoder_no_repeat_ngram_size': 0,
43
+ 'bad_words_ids': None,
44
+ 'num_return_sequences': 1,
45
+ 'chunk_size_feed_forward': 0,
46
+ 'output_scores': False,
47
+ 'return_dict_in_generate': False,
48
+ 'forced_bos_token_id': None,
49
+ 'forced_eos_token_id': None,
50
+ 'remove_invalid_values': False,
51
+ 'exponential_decay_length_penalty': None,
52
+ 'suppress_tokens': None,
53
+ 'begin_suppress_tokens': None,
54
+ 'architectures': ['LlamaForCausalLM'],
55
+ 'finetuning_task': None,
56
+ 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'},
57
+ 'label2id': {'LABEL_0': 0, 'LABEL_1': 1},
58
+ 'tokenizer_class': None,
59
+ 'prefix': None,
60
+ 'bos_token_id': 1,
61
+ 'pad_token_id': None,
62
+ 'eos_token_id': 2,
63
+ 'sep_token_id': None,
64
+ 'decoder_start_token_id': None,
65
+ 'task_specific_params': None,
66
+ 'problem_type': None,
67
+ '_name_or_path': 'mahimairaja/tweet-summarization-llama-2-finetuned',
68
+ 'transformers_version': '4.32.1',
69
+ 'model_type': 'llama',
70
+ 'quantization_config': {'quant_method': <QuantizationMethod.BITS_AND_BYTES: 'bitsandbytes'>,
71
+ 'load_in_8bit': False,
72
+ 'load_in_4bit': True,
73
+ 'llm_int8_threshold': 6.0,
74
+ 'llm_int8_skip_modules': None,
75
+ 'llm_int8_enable_fp32_cpu_offload': False,
76
+ 'llm_int8_has_fp16_weight': False,
77
+ 'bnb_4bit_quant_type': 'nf4',
78
+ 'bnb_4bit_use_double_quant': False,
79
+ 'bnb_4bit_compute_dtype': 'float16'}}