File size: 3,113 Bytes
5b8c744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
architecture:
    backbone_dtype: bfloat16
    force_embedding_gradients: false
    gradient_checkpointing: true
    intermediate_dropout: 0.0
    pretrained: true
    pretrained_weights: ''
augmentation:
    neftune_noise_alpha: 0.0
    random_parent_probability: 0.0
    skip_parent_probability: 0.0
    token_mask_probability: 0.0
dataset:
    add_eos_token_to_answer: true
    add_eos_token_to_prompt: true
    add_eos_token_to_system: true
    answer_column: Sample Answer
    chatbot_author: ''
    chatbot_name: ''
    data_sample: 1.0
    data_sample_choice:
    - Train
    - Validation
    limit_chained_samples: false
    mask_prompt_labels: true
    parent_id_column: None
    personalize: false
    prompt_column:
    - User Prompt
    system_column: System Prompt
    text_answer_separator: <|answer|>
    text_prompt_start: <|prompt|>
    text_system_start: <|system|>
    train_dataframe: /app/train_df.csv
    validation_dataframe: ''
    validation_size: 0.0099999998
    validation_strategy: automatic
environment:
    compile_model: false
    deepspeed_offload_optimizer: false
    deepspeed_reduce_bucket_size: 10000000.0
    deepspeed_stage3_max_live_parameters: 10000000.0
    deepspeed_stage3_max_reuse_distance: 10000000.0
    deepspeed_stage3_param_persistence_threshold: 10000000.0
    deepspeed_stage3_prefetch_bucket_size: 10000000.0
    find_unused_parameters: false
    gpus:
    - '0'
    huggingface_branch: main
    mixed_precision: false
    number_of_workers: 8
    seed: -1
    trust_remote_code: false
    use_deepspeed: false
experiment_name: Fatima HR
llm_backbone: meta-llama/Meta-Llama-3-8B
logging:
    logger: None
    neptune_project: ''
output_directory: /app/output
prediction:
    batch_size_inference: 0
    do_sample: false
    max_length_inference: 5000
    metric: Perplexity
    metric_gpt_model: gpt-3.5-turbo-0301
    metric_gpt_template: general
    min_length_inference: 2
    num_beams: 1
    num_history: 4
    repetition_penalty: 1.2000000477
    stop_tokens: ''
    temperature: 0.1
    top_k: 0
    top_p: 1.0
problem_type: text_causal_language_modeling
tokenizer:
    add_prefix_space: false
    add_prompt_answer_tokens: false
    max_length: 4000
    max_length_answer: 4000
    max_length_prompt: 2560
    padding_quantile: 1.0
    use_fast: true
tracking_mode: after_epoch
training:
    alpha: 0.9900000095
    batch_size: 4
    beta1: 0.8999999762
    beta2: 0.9990000129
    differential_learning_rate: 1.0e-05
    differential_learning_rate_layers: []
    drop_last_batch: true
    epochs: 2
    eps: 1.0e-08
    evaluate_before_training: false
    evaluation_epochs: 1.0
    grad_accumulation: 1
    gradient_clip: 0.5
    learning_rate: 0.0001
    lora: true
    lora_alpha: 16
    lora_dropout: 0.0500000007
    lora_r: 4
    lora_target_modules: []
    loss_function: TokenAveragedCrossEntropy
    momentum: 0.0
    nesterov: false
    optimizer: AdamW
    rho: 0.8999999762
    save_best_checkpoint: false
    schedule: Cosine
    train_validation_data: false
    use_flash_attention_2: false
    warmup_epochs: 0.0
    weight_decay: 0.0