sosoai commited on
Commit
34ce7d0
1 Parent(s): 500fbf0

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +30 -0
README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base model = beomi-Llama-3-Open-Ko-8B-Instruct-preview
2
+ base model = hansoldeco-beomi-Llama-3-Open-Ko-8B-Instruct-preview (Trained via Axolotl)
3
+
4
+ dora_train config
5
+ (from fsdp_qlora repo)
6
+
7
+ '''
8
+ export CUDA_VISIBLE_DEVICES=0,1
9
+ python train.py \
10
+ --train_type bnb_dora \
11
+ --model_name sosoai/hansoldeco-beomi-Llama-3-Open-Ko-8B-Instruct-preview \
12
+ --dataset orca_math \
13
+ --dataset_samples 193789 \
14
+ --batch_size 4 \
15
+ --context_length 8192 \
16
+ --gradient_accumulation_steps 2 \
17
+ --sharding_strategy full_shard \
18
+ --use_gradient_checkpointing true \
19
+ --reentrant_checkpointing true \
20
+ --use_cpu_offload false \
21
+ --use_activation_cpu_offload false \
22
+ --log_to wandb \
23
+ --project_name "sosoai-fsdp-quantized-ft-exps" \
24
+ --save_model true \
25
+ --output_dir models/llama-8b-orca-math-10k-bnb-QDoRA
26
+ '''
27
+
28
+ Dataset = hansoldeco domain own dataset (Non open)
29
+ Dataset = kuotient/orca-math-word-problems-193k-korean
30
+