gpt-j-6b-8bit-lora / config.py
Enkhai's picture
Upload GPTJLoraForCausalLM
668789a
raw
history blame
270 Bytes
from transformers import GPTJConfig
class GPTJLoraConfig(GPTJConfig):
model_type = "gptj-lora"
def __init__(self, add_adapters=False, **kwargs):
self.add_apapters = add_adapters
super().__init__(**kwargs)
self.model_type = "gptj-lora"