import torch.nn as nn
from transformers import BertForSequenceClassification, Trainer, TrainingArguments

from metric import compute_metrics
from tokenProcess import train_dataset, val_dataset

training_args = TrainingArguments(
    output_dir='./results',  # output directory
    num_train_epochs=10,  # total number of training epochs
    per_device_train_batch_size=16,  # batch size per device during training
    per_device_eval_batch_size=16,  # batch size for evaluation 默认这里是64，一开始cuda.out of memory了，回调到16
    warmup_steps=500,  # number of warmup steps for learning rate scheduler
    learning_rate=3e-5,
    weight_decay=0.01,  # strength of weight decay
    logging_dir='./logs',  # directory for storing logs
    logging_steps=10,
    # evaluation_strategy="epoch"  # 经过初步的感受，现在已经不需要再做每一轮的测试，只需要训练好了以后看最终的结果即可
)

# https://blog.csdn.net/qq_34914551/article/details/87699317
# 学习率衰减的问题
# optimizer = optim.Adam([{'params':model.conv1.parameters(),'lr':0.2},
#                         {'params':model.conv2.parameters(),'lr':0.2},
#                         {'params':prelu_params,'lr':0.02},
#                         {'params':rest_params,'lr':0.3}
#                         ])
# AdamW (PyTorch)也是类似的操作

# num_labels = 2
# # 基于pretrain_model的config+多配置一个num_labels，比如句子二分类问题就是，标签数为2，然后把config传进去
# config = BertConfig.from_pretrained("./bert-base-chinese", num_labels=num_labels)
#
# # 也可以直接把num_labels显示地声明到from_pretrained的参数里面
# https://huggingface.co/transformers/training.html
# from transformers import AutoModelForSequenceClassification
# model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)

# 修改为22分类问题
model = BertForSequenceClassification.from_pretrained("./bert-base-chinese", num_labels=22)

# 它的思路并不是说，我们要从底层的transformers库里面去修改什么，而是这种我就按照底层transformers的去初始化走一遍，然后我再把属性get出来进行修改

# 把模型的属性get出来，参考 https://github.com/asappresearch/revisit-bert-finetuning
encoder_temp = getattr(model, 'bert')

reinit_pooler = True
reinit_layers = 0

# 重新初始化 池化层
if reinit_pooler:
    encoder_temp.pooler.dense.weight.data.normal_(mean=0.0, std=encoder_temp.config.initializer_range)
    # 0.02
    # print(encoder_temp.config.initializer_range)
    encoder_temp.pooler.dense.bias.data.zero_()
    for p in encoder_temp.pooler.parameters():
        p.requires_grad = True

if reinit_pooler and reinit_layers > 0:
    # 替换成BertPreTrainedModel._init_weights的形式
    for layer in encoder_temp.encoder.layer[-reinit_layers:]:
        for module in layer.modules():
            """Initialize the weights"""
            if isinstance(module, nn.Linear):
                # Slightly different from the TF version which uses truncated_normal for initialization
                # cf https://github.com/pytorch/pytorch/pull/5617
                # 改成是用它自己的初始化range
                module.weight.data.normal_(mean=0.0, std=encoder_temp.config.initializer_range)
                if module.bias is not None:
                    module.bias.data.zero_()
            elif isinstance(module, nn.Embedding):
                module.weight.data.normal_(mean=0.0, std=encoder_temp.config.initializer_range)
                if module.padding_idx is not None:
                    module.weight.data[module.padding_idx].zero_()
            elif isinstance(module, nn.LayerNorm):
                module.bias.data.zero_()
                module.weight.data.fill_(1.0)

# print(model)
# f = open("out_no_reinit.txt", "w")
# for name, param in model.named_parameters():
#     if param.requires_grad:
#         print("-----model.named_parameters()--{}:{}".format(name, param), file=f)
# f.close()

# 把它加到trainer里面,compute_metrics

trainer = Trainer(
    model=model,  # the instantiated 🤗 Transformers model to be trained
    args=training_args,  # training arguments, defined above
    train_dataset=train_dataset,  # training dataset
    eval_dataset=val_dataset,  # evaluation dataset
    compute_metrics=compute_metrics
)

trainer.train()

# 然后显示地告诉模型要进行evaluate,带有return返回值的方法
print(trainer.evaluate())

# 在 training_args当中可以显示地声明，evaluation_strategy="epoch"，这样就可以每训练一轮就进行准确率的验证，看它随着训练轮数进行下去准确率的情况
# 下一步就可以通过不同的实验来看每一种

trainer.save_model('./fine-tune-event')

# # a list of re pattern of tensor names to ignore from the model when loading the model weights
# # (and avoid unnecessary warnings).
# _keys_to_ignore_on_load_missing = None
# 这个是在找如何重新初始化bert层里面找错了的东西，实际上这个只是忽略没有加载进来的weight，不报warning的问题
