poonehmousavi's picture
Create hyperparams.yaml
88b58c6
# ################################
# Model: Llama2 dModel + NLL
# Authors:
# Pooneh Mousavi 2023
# ################################
# URL for the gpt2 model
model_hub: meta-llama/Llama-2-7b-chat-hf
llama2_folder: recipes/MultiWOZ/response_generation/llama2/results/train_with_llama2/1995/save/llama2_checkpoint/
# history_window, i.e. how many user-system exchanges consider as context.
max_history: 2
# decoder setting
freeze_model: True
num_beams: 8
max_new_tokens: 50
top_k: 45
top_p: 0.9
#LLAMA2 model
model: !new:custom.LLAMA2_expanded
source: !ref <model_hub>
freeze: !ref <freeze_model>
save_path: !ref <llama2_folder>
max_new_tokens: !ref <max_new_tokens>
num_beams: !ref <num_beams>
top_k: !ref <top_k>
top_p: !ref <top_p>
with_peft: True
# Masks
padding_mask: !name:speechbrain.lobes.models.transformer.Transformer.get_key_padding_mask
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
loadables:
model: !ref <model>
modules:
model: !ref <model>