
import torch
import datasets
import transformers
import pandas as pd
import numpy as np
from transformers import BigBirdTokenizer, \
BigBirdForSequenceClassification, Trainer, TrainingArguments, EvalPrediction, AutoTokenizer, BertTokenizer
from torch.utils.data import Dataset, DataLoader
from datasets import Dataset, DatasetDict
from sklearn.model_selection import train_test_split
from data_util.data_pro import get_data
import model.config as conf
# import wandb
import random
# path = "../dataset/oh/oh2.csv"
# df=pd.read_csv("../dataset/hyper/hy.csv")
# df = pd.read_csv("../dataset/hyper/hy.csv")

from sklearn.model_selection import train_test_split
# train_data, test_data = train_test_split(df, shuffle=True)
# train_data, test_data = train_test_split(df, test_size=0.5, shuffle = True)
# train_data = Dataset.from_pandas(train_data)
# test_data = Dataset.from_pandas(test_data)
train_data = get_data('public_dataset/train_dataset.txt')
test_data = get_data('public_dataset/valid_dataset.txt')
train_data = pd.DataFrame(train_data, columns=['text', 'label'])
test_data = pd.DataFrame(test_data, columns=['text', 'label'])
train_data = Dataset.from_pandas(train_data)
test_data = Dataset.from_pandas(test_data)
num_class = len(conf.cls)

tokenizer = BertTokenizer.from_pretrained("E:\\czy\\bert_attention\\bigbird", max_length=1024, num_labels= num_class)
model = BigBirdForSequenceClassification.from_pretrained("E:\\czy\\bert_attention\\bigbird", num_labels=num_class)
# define a function that will tokenize the model, and will return the relevant inputs for the model
def tokenization(batched_text):
    return tokenizer(batched_text['text'], padding='max_length', truncation=True, max_length=1024)
train_data = train_data.map(tokenization, batched = True, batch_size=len(train_data))
test_data = test_data.map(tokenization, batched=True, batch_size=len(test_data))
train_data.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])
test_data.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# define accuracy metrics
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    # argmax(pred.predictions, axis=1)
    #pred.predictions.argmax(-1)
    precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='macro')
    acc = accuracy_score(labels, preds)
    return {
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall
    }


# define the training arguments
training_args = TrainingArguments(
    output_dir="checkpoint",
    num_train_epochs=3,
    per_device_train_batch_size=2,
    gradient_accumulation_steps=32,
    per_device_eval_batch_size=32,
    weight_decay=0.01,
    logging_steps=5,
    learning_rate=3e-5,
    evaluation_strategy='epoch',
    save_strategy='epoch',
    disable_tqdm=False,
    load_best_model_at_end=True,
    warmup_steps=200,
    fp16=False,
    logging_dir='./dataset',
    dataloader_num_workers=0,
    run_name='bigbird_classification_1e5'
)

# instantiate the trainer class and check for available devices
trainer = Trainer(
    model=model,
    args=training_args,
    compute_metrics=compute_metrics,
    train_dataset=train_data,
    eval_dataset=test_data
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
trainer.train()
trainer.evaluate()