Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import json
|
|
3 |
from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer
|
4 |
from datasets import Dataset
|
5 |
import shutil
|
6 |
-
import
|
7 |
|
8 |
# Load tokenized data
|
9 |
def load_data(tokenized_file):
|
@@ -27,8 +27,10 @@ def fine_tune_model(tokenized_file, progress=gr.Progress()):
|
|
27 |
training_args = TrainingArguments(
|
28 |
output_dir='./results',
|
29 |
num_train_epochs=3,
|
30 |
-
per_device_train_batch_size=
|
31 |
-
per_device_eval_batch_size=
|
|
|
|
|
32 |
warmup_steps=500,
|
33 |
weight_decay=0.01,
|
34 |
logging_dir='./logs',
|
|
|
3 |
from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer
|
4 |
from datasets import Dataset
|
5 |
import shutil
|
6 |
+
import torch
|
7 |
|
8 |
# Load tokenized data
|
9 |
def load_data(tokenized_file):
|
|
|
27 |
training_args = TrainingArguments(
|
28 |
output_dir='./results',
|
29 |
num_train_epochs=3,
|
30 |
+
per_device_train_batch_size=8, # Reduce batch size
|
31 |
+
per_device_eval_batch_size=16,
|
32 |
+
gradient_accumulation_steps=2, # Use gradient accumulation
|
33 |
+
fp16=True, # Enable mixed precision training
|
34 |
warmup_steps=500,
|
35 |
weight_decay=0.01,
|
36 |
logging_dir='./logs',
|