tahauzumcu commited on
Commit
f6b1b9c
1 Parent(s): 8fc3926

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -4
README.md CHANGED
@@ -13,7 +13,7 @@ Base Model: unsloth/llama-3-8b-bnb-4bit
13
 
14
  Training Dataset: A combined dataset of alpaca, dolly and bactrainx which is translated to turkish.
15
 
16
- Training Method: Fine-tuned with Unsloth, which uses QLoRA. Used ORPO
17
 
18
  #TrainingArguments\
19
  PER_DEVICE_BATCH_SIZE: 2\
@@ -52,12 +52,11 @@ How to load with unsloth:
52
  ```commandline
53
  from unsloth import FastLanguageModel
54
 
55
- max_seq_len = 2048
56
  model, tokenizer = FastLanguageModel.from_pretrained(
57
  model_name="VeriUs/VeriUS-LLM-8b-v0.2",
58
  max_seq_length=max_seq_len,
59
- dtype=None,
60
- load_in_4bit=True
61
  )
62
  FastLanguageModel.for_inference(model) # Enable native 2x faster inference
63
 
 
13
 
14
  Training Dataset: A combined dataset of alpaca, dolly and bactrainx which is translated to turkish.
15
 
16
+ Training Method: Fine-tuned with Unsloth, QLoRA and ORPO
17
 
18
  #TrainingArguments\
19
  PER_DEVICE_BATCH_SIZE: 2\
 
52
  ```commandline
53
  from unsloth import FastLanguageModel
54
 
55
+ max_seq_len = 1024
56
  model, tokenizer = FastLanguageModel.from_pretrained(
57
  model_name="VeriUs/VeriUS-LLM-8b-v0.2",
58
  max_seq_length=max_seq_len,
59
+ dtype=None
 
60
  )
61
  FastLanguageModel.for_inference(model) # Enable native 2x faster inference
62