pecore / presets.py
gsarti's picture
Fix mCORA, add Gemma preset
06bb18a
raw
history blame
2.76 kB
def set_cora_preset():
return (
"gsarti/cora_mgen", # model_name_or_path
"<Q>:{current} <P>:{context}", # input_template
"<Q>:{current}", # input_current_text_template
)
def set_default_preset():
return (
"gpt2", # model_name_or_path
"{current} {context}", # input_template
"{current}", # input_current_template
"{current}", # output_template
[], # special_tokens_to_keep
"", # decoder_input_output_separator
"{}", # model_kwargs
"{}", # tokenizer_kwargs
"{}", # generation_kwargs
"{}", # attribution_kwargs
)
def set_zephyr_preset():
return (
"stabilityai/stablelm-2-zephyr-1_6b", # model_name_or_path
"<|system|>\n{context}</s>\n<|user|>\n{current}</s>\n<|assistant|>\n", # input_template
"<|user|>\n{current}</s>\n<|assistant|>\n", # input_current_text_template
"\n", # decoder_input_output_separator
)
def set_chatml_preset():
return (
"Qwen/Qwen1.5-0.5B-Chat", # model_name_or_path
"<|im_start|>system\n{context}<|im_end|>\n<|im_start|>user\n{current}<|im_end|>\n<|im_start|>assistant\n", # input_template
"<|im_start|>user\n{current}<|im_end|>\n<|im_start|>assistant\n", # input_current_text_template
"", # decoder_input_output_separator
["<|im_start|>", "<|im_end|>"], # special_tokens_to_keep
)
def set_mmt_preset():
return (
"facebook/mbart-large-50-one-to-many-mmt", # model_name_or_path
"{context} {current}", # input_template
"{context} {current}", # output_template
'{\n\t"src_lang": "en_XX",\n\t"tgt_lang": "fr_XX"\n}', # tokenizer_kwargs
)
def set_towerinstruct_preset():
return (
"Unbabel/TowerInstruct-7B-v0.1", # model_name_or_path
"<|im_start|>user\nSource: {current}\nContext: {context}\nTranslate the above text into French. Use the context to guide your answer.\nTarget:<|im_end|>\n<|im_start|>assistant\n", # input_template
"<|im_start|>user\nSource: {current}\nTranslate the above text into French.\nTarget:<|im_end|>\n<|im_start|>assistant\n", # input_current_text_template
"", # decoder_input_output_separator
["<|im_start|>", "<|im_end|>"], # special_tokens_to_keep
)
def set_gemma_preset():
return (
"google/gemma-2b-it", # model_name_or_path
"<start_of_turn>user\n{context}\n{current}<end_of_turn>\n<start_of_turn>model\n", # input_template
"<start_of_turn>user\n{current}<end_of_turn>\n<start_of_turn>model\n", # input_current_text_template
"", # decoder_input_output_separator
["<start_of_turn>", "<end_of_turn>"], # special_tokens_to_keep
)