gpt-j-6b-8bit-lora / config.py
Enkhai's picture
add model
1818e6a
raw
history blame
280 Bytes
from transformers import GPTJConfig
class GPTJLoraConfig(GPTJConfig):
model_type = "gptj-lora"
def __init__(self, add_adapters=False, **kwargs):
self.add_apapters = add_adapters
super().__init__(**kwargs)
self.model_type = "gptj-lora"