#############################################################################
# 本脚本展示使用HuggingFace的Bert模型进行文本分类, 代码参考如下连接:
# https://percent4.github.io/archives/
#
# 
#                                                       Author： Xian Yang
#                                                     Date Time： 2023.11.27
##############################################################################

import torch
import datasets
import numpy    as np

# 导入transformers库中的自动tokenizer工具，含padding功能的dataloader工具
from transformers    import AutoTokenizer, DataCollatorWithPadding
from transformers    import AutoModelForSequenceClassification
from transformers    import Trainer, TrainingArguments
from sklearn.metrics import accuracy_score, precision_recall_fscore_support

checkpoint   = "bert-base-uncased"
device       = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 使用datasets模块导入imdb数据集（英语影评数据集，常用于文本分类, 加载预训练模型bert-base-cased的tokenizer。
tokenizer    = AutoTokenizer.from_pretrained(checkpoint)

# 数据集下载后默认的存储路径: ~/.cache/huggingface/datasets
#    +  ~/.cache/huggingface/modules/datasets_modules
raw_datasets = datasets.load_dataset('imdb')     

# DatasetDict({
#     train: Dataset({
#         features: ['text', 'label'],
#         num_rows: 25000
#     })
#     test: Dataset({
#         features: ['text', 'label'],
#         num_rows: 25000
#     })
#     unsupervised: Dataset({
#         features: ['text', 'label'],
#         num_rows: 50000
#     })
# })
# print(raw_datasets)

def tokenize_function(sample):
    return tokenizer(sample['text'], max_length=300, truncation=True)

# 对数据集进行批量tokenize操作
# tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)

# DatasetDict({
#     train: Dataset({
#         features: ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
#         num_rows: 25000
#     })
#     test: Dataset({
#         features: ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
#         num_rows: 25000
#     })
#     unsupervised: Dataset({
#         features: ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
#         num_rows: 50000
#     })
# })
# 其中：
#   - input_ids:       token转换后再词表的索引
#   - token_type_ids:  标记句子的开始和结束，SequenceClassification系列问题需要将整个文章的句子拼凑在一起，
#                      因此需要标记句子的开始和结束, 前三句话的token_type_ids为0,1,2，以此类推. 每句话内
#                      token的token_type_ids均相同.
#   - attention_mask:  标记哪些token是padding的，padding的token不参与训练，而是用[PAD]进行替换, 不参与
#                      attention的计算.
# print(tokenized_datasets)

# 将数据按batch打成dataloader
# data_collator      = DataCollatorWithPadding(tokenizer=tokenizer)

# 官方模型下载后默认的存储路径: Users/yangxianpku/.cache/huggingface/hub
# model              = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2).to(device)


# def compute_metrices(pred):
#     labels = pred.label_ids
#     preds  = pred.predictions.argmax(-1)
#     precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
#     acc                      = accuracy_score(labels, preds)
#     return { 'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall }

# training_args = TrainingArguments(output_dir='../outputs/imdb_trainer',      # 指定输出文件夹，没有会自动创建
#                                     evaluation_strategy="epoch",
#                                     per_device_train_batch_size=32,
#                                     per_device_eval_batch_size=32,
#                                     learning_rate=5e-5,
#                                     num_train_epochs=3,
#                                     warmup_ratio=0.2,
#                                     logging_dir='../outputs/imdb_train_logs',
#                                     logging_strategy="epoch",
#                                     save_strategy="epoch",
#                                     report_to="none"
#                                 ) 

# trainer = Trainer(model,
#                     training_args,
#                     train_dataset=tokenized_datasets["train"],
#                     eval_dataset=tokenized_datasets["test"],
#                     # # 在定义了tokenizer之后，其实这里的data_collator就不用再写了，会自动根据tokenizer创建
#                     data_collator=data_collator,  
#                     tokenizer=tokenizer,
#                     compute_metrics=compute_metrices
#                 )

# trainer.train()

# 训练结果
# {'loss': 0.3298, 'learning_rate': 4.168443496801706e-05, 'epoch': 1.0}                                                                             
# {'eval_loss': 0.417169988155365, 'eval_accuracy': 0.8484, 'eval_f1': 0.8458619595736475, 
#   'eval_precision': 0.8729650001901906, 'eval_recall': 0.8484, 'eval_runtime': 55.8942, 
#   'eval_samples_per_second': 447.274, 'eval_steps_per_second': 13.991, 'epoch': 1.0}    
#                                  
# {'loss': 0.1666, 'learning_rate': 2.084221748400853e-05, 'epoch': 2.0}                                                                                                            
# {'eval_loss': 0.19941680133342743, 'eval_accuracy': 0.92404, 'eval_f1': 0.924011164486192, 
#   'eval_precision': 0.9246846209719762, 'eval_recall': 0.92404, 'eval_runtime': 56.0208, 
#   'eval_samples_per_second': 446.263, 'eval_steps_per_second': 13.959, 'epoch': 2.0}    
#                                                                                             
# {'loss': 0.0642, 'learning_rate': 0.0, 'epoch': 3.0}                                                                                                                              
# {'eval_loss': 0.30445125699043274, 'eval_accuracy': 0.92564, 'eval_f1': 0.925638637818823, 
#   'eval_precision': 0.9256711904605412, 'eval_recall': 0.92564, 'eval_runtime': 56.029, 
#   'eval_samples_per_second': 446.198, 'eval_steps_per_second': 13.957, 'epoch': 3.0}      
# 
#                                                                                                                  
# {'train_runtime': 648.8091, 'train_samples_per_second': 115.596, 'train_steps_per_second': 3.616,
#    'train_loss': 0.18684190455808258, 'epoch': 3.0} 
