import os
import torch
import numpy as np
from datasets import load_dataset
import nlp.arrow_dataset
from transformers import BertModel, BertTokenizer
from transformers import BertForSequenceClassification, BertTokenizerFast, Trainer, TrainingArguments

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cache_path = r"C:\\Users\\jliud\\.cache\\huggingface\\hub"

# bert_path = r"C:\Users\jliud\.cache\huggingface\hub\models--bert-base-uncased"
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', cache_dir=cache_path)


# 定义预处理数据函数
def preprocess(data):
    data = tokenizer(data['text'], padding=True, truncation=True, max_length=512)
    return data


#  将数据集拆分成训练集和测试集
dataset = load_dataset('csv', data_files='data/imdbs.csv', split='train')
dataset = dataset.train_test_split(test_size=0.3)


#  创建训练和测试集
# dataset = dataset.to(device)
train_set = dataset['train']
test_set = dataset['test']


model = BertForSequenceClassification.from_pretrained('bert-base-uncased', cache_dir=cache_path)
model = model.to(device)


train_set = train_set.map(preprocess, batched=True, batch_size=len(train_set))
test_set = test_set.map(preprocess, batched=True, batch_size=len(test_set))

# 使用set_format函数选择我们在数据集中需要的列
train_set.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])
test_set.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])

# 训练模型，定义批大小和epoch大小
batch_size = 1
epochs = 2

# 定义热身步数和权重衰减
warmup_steps = 500
weight_decay = 0.01

# 定义训练参数
training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=epochs,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    warmup_steps=warmup_steps,
    weight_decay=weight_decay,
    logging_dir='./logs',
    optim="adamw_torch",  # 修复告警
)

# 定义训练器
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_set,
    eval_dataset=test_set
)

# 开始训练
trainer.train()

print(tokenizer)

# 通过函数evaluate函数评估模型
trainer.evaluate()