# text-classification 
# 參照：https://huggingface.co/docs/transformers/v4.32.1/en/tasks/sequence_classification

from datasets import load_dataset
from transformers import BertTokenizer,AutoTokenizer,TrainingArguments,DataCollatorForLanguageModeling,AutoModelForCausalLM,AutoModelForSequenceClassification
from transformers import Trainer
from transformers import DataCollatorWithPadding

import evaluate
import numpy as np


#### 分詞 ####

raw_datasets = load_dataset('json', data_files={
    'train':['/workspace/datasets/emotion-recognition/weibo-emotion-4000.json','/workspace/datasets/emotion-recognition/usual-train-class-6-27768.json','/workspace/datasets/emotion-recognition/emotion-class-6-40133.json']
})
print(raw_datasets)

datasets = raw_datasets["train"]
datasets = datasets.train_test_split(test_size=0.1)
print(datasets)
tokenizer = AutoTokenizer.from_pretrained("/large-storage/model/bert-base-chinese")

# if tokenizer.pad_token is None:
    # tokenizer.add_special_tokens({'pad_token': '[PAD]'})

def preprocess_function(examples):
    return tokenizer(examples["text"], truncation=True)

# inputs = tokenizer(raw_datasets["train"][0]["text"])
# print(raw_datasets["train"][0]["text"])
# print(inputs)

tokenized_dataset = datasets.map(
    preprocess_function,
    batched=True
)
print(tokenized_dataset)

#现在使用 DataCollatorForLanguageModeling 创建一批示例。
# 在整理过程中，将句子动态填充到批次中最长的长度，比将整个数据集填充到最大长度更有效。
# tokenized_dataset = tokenized_dataset.remove_columns(tokenized_dataset["train"].column_names)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer,padding=True)





###  开始训练 ###

id2label ={0:'neutral',1:'sadness',2:'like',3:'anger',4:'disgust',5:'fear',6:'happiness',7:'surprise'}
label2id = {'neutral':0,'sadness':1,'like':2,'anger':3,'disgust':4,'fear':5,'happiness':6,'surprise':7}

training_args = TrainingArguments(
    output_dir="./output/emotion-recognition/bert-base-chinese/",
    evaluation_strategy="epoch",
    save_strategy="epoch",
    learning_rate=2e-5,
    weight_decay=0.01,
    num_train_epochs=2,
    per_device_train_batch_size=8,
    per_device_eval_batch_size=8
)


#加载模型
model = AutoModelForSequenceClassification.from_pretrained("/large-storage/model/bert-base-chinese",num_labels=8, id2label=id2label, label2id=label2id)


### 評估 ###

accuracy = evaluate.load("accuracy/accuracy.py")

def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    predictions = np.argmax(predictions, axis=1)
    return accuracy.compute(predictions=predictions, references=labels)


trainer = Trainer(
    model,
    training_args,
    train_dataset = tokenized_dataset["train"],
    eval_dataset = tokenized_dataset["test"],
    data_collator = data_collator,
    tokenizer = tokenizer,
    compute_metrics = compute_metrics
)


trainer.train()

 