File size: 372 Bytes
5472531 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
# Make it more memory efficient by monkey patching the LLaMA model with xformers attention.
# Need to call this before importing transformers.
from fastchat.train.llama_xformers_attn_monkey_patch import (
replace_llama_attn_with_xformers_attn,
)
replace_llama_attn_with_xformers_attn()
from fastchat.train.train import train
if __name__ == "__main__":
train()
|