from transformers import (
    AutoTokenizer,
    DataCollatorWithPadding,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer,
)
from sklearn.metrics import (
    accuracy_score,
    precision_recall_fscore_support,
)
import datasets

full_fine_tuning = False
data_path = {'train': './data/reviews.csv', 'test': './data/reviews.csv'}
# data_path = {'train': './data/train.csv', 'test': './data/dev.csv'}
raw_datasets = datasets.load_dataset('csv', data_files=data_path, delimiter=',')
model_name_for_path = 'bert-base-chinese'
tokenizer = AutoTokenizer.from_pretrained(model_name_for_path)
model = AutoModelForSequenceClassification.from_pretrained(model_name_for_path, num_labels=2)


def tokenize_function(examples):
    return tokenizer(examples['SentimentText'], padding="max_length", truncation=True)


tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
print(tokenized_datasets)

train_dataset = tokenized_datasets['train'].shuffle(seed=114514)
eval_dataset = tokenized_datasets['test'].shuffle(seed=114514)

def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
    acc = accuracy_score(labels, preds)
    return {
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall,
    }

training_args = TrainingArguments(
    output_dir='./review_trainer',
    eval_strategy='epoch',
    per_device_train_batch_size=32,
    per_device_eval_batch_size=32,
    learning_rate=5e-5,
    num_train_epochs=3,
    warmup_ratio=0.2,
    logging_dir='./review_train_logs',
    logging_strategy='epoch',
    save_strategy='epoch',
    report_to='tensorboard',
)
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets['train'],
    eval_dataset=tokenized_datasets['test'],
    data_collator=data_collator,
    processing_class=tokenizer,
    compute_metrics=compute_metrics,
)

if not full_fine_tuning:
    for param in model.bert.parameters():
        param.requires_grad = False

trainer.train()
trainer.save_model('./review_model')