from typing import Optional

import paddle
from paddle import Tensor
from paddle.fluid.reader import DataLoader
from paddlenlp.transformers import BertForMaskedLM
from paddlenlp.transformers import CosineDecayWithWarmup
import os

from tqdm import tqdm
from BertPrompt import train, logging, save

from PromptDataSet import PromptDataSet, collate_fn

model_dir = os.path.join(os.getcwd(), 'model_hub')


class PromptModel(paddle.nn.Layer):
    def __init__(self, path: str = os.path.join(model_dir, 'bert-base-chinese')):
        super(PromptModel, self).__init__()
        self.model = BertForMaskedLM.from_pretrained(path)
        self.softmax = paddle.nn.Softmax()

    def forward(self, input_ids, token_type_ids, attention_mask, labels: Optional[Tensor] = None):
        logits = self.model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
                            labels=labels)
        if labels is not None:
            loss, logits = logits
        logits = self.softmax(logits)
        if labels is None:
            return logits
        return loss, logits


@paddle.no_grad()
def dev(model:PromptModel, dev_data_loader: DataLoader):
    total = 0
    accuracy = 0
    loss = 0
    model.eval()
    for item in tqdm(dev_data_loader):
        input_ids = item['input_ids']
        batch_size = len(input_ids)
        total += batch_size
        token_type_ids = item['token_type_ids']
        label = item['label']
        label_model = item['label_model']
        attention_mask = item['attention']
        dev_loss, logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
                                 labels=label_model)  # [batch_size*seq_max_length*vocab_length]
        loss += dev_loss.item()
        d = paddle.tensor.index_select(logits, index=paddle.to_tensor([5, 6]), axis=1)  # batch*2*vocab_size
        model_result = paddle.argmax(d, axis=2)  # batch*2
        temp = (model_result == label).sum(axis=1)  # batch,正确的应该是2，否则都是错的
        accuracy += batch_size - paddle.count_nonzero(temp - 2).item()  # 不是0的都是错的

    return accuracy / total, loss


if __name__ == '__main__':
    model = PromptModel()

    a = PromptDataSet("train")
    b = PromptDataSet("dev")
    train_data_loader = DataLoader(a, batch_size=256, shuffle=False, collate_fn=collate_fn)
    dev_data_loader = DataLoader(b, batch_size=256, shuffle=False, collate_fn=collate_fn)

    learning_rate = 4e-5
    # 训练轮次
    epochs = 50
    # 学习率预热比例，用于控制“学习率-Step次数”曲线中峰值点的Step位置，此处是在整个Step的10%位置到达峰值，然后衰减。
    warmup_proportion = 0.1
    # 逐层权重衰减系数，类似模型正则项策略，避免模型过拟合
    weight_decay = 0.0

    # 训练过程中总共经历的Step数量
    num_training_steps = len(train_data_loader) * epochs
    # 线性预热学习率，在开始的“warmup_proportion*num_training_steps”个Step中，学习率由0线性增加到learning_rate，然后再余弦衰减到0。
    lr_scheduler = CosineDecayWithWarmup(learning_rate, num_training_steps, warmup_proportion)

    # AdamW优化器
    optimizer = paddle.optimizer.AdamW(
        learning_rate=lr_scheduler,
        parameters=model.parameters(),
        weight_decay=weight_decay,
        apply_decay_param_fun=lambda x: x in [
            p.name for n, p in model.named_parameters()
            if not any(nd in n for nd in ["bias", "norm"])
        ])

    best = 0
    final_checkpoint = {}
    for item in range(epochs):
        logging.info(f"开始准备第{item + 1}次训练")
        loss_total = train(model=model, dataLoader=train_data_loader, optimizer=optimizer, lr_scheduler=lr_scheduler)
        logging.info(f"第{item + 1}次训练的损失为{loss_total}")
        accu, loss = dev(model, dev_data_loader)
        logging.info(f"第{item + 1}次测试损失为{loss}，准确度为{accu * 100}%")
        final_checkpoint["epoch"] = item + 1
        final_checkpoint["loss"] = loss_total
        if accu > best:
            logging.info("开始保存")
            save(model, optimizer, checkpoint=final_checkpoint)
            best = accu
            logging.info("保存完毕")
