#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: train_utils.py 
@time: 2022/05/05
@software: PyCharm 
description:
"""
import os
import logging
from tqdm import tqdm

import torch
import torch.nn as nn
from transformers import AdamW,BertTokenizer
from transformers import get_scheduler
from accelerate import Accelerator

from utils.metric_utils import get_f1_score,get_f1_score_crf
from utils.data_utils import id_to_label,crf_decode


logging.basicConfig(
    level=logging.INFO,
    filename="./logs/logs",
    # stream=sys.stdout,
    format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)


def train_loop_crf(model,train_data_loader,test_data_loader,config):
    """
    训练方法
    :param model:
    :param data_loader:
    :param config:
    :return:
    """
    #1.定义optimizer
    #2.定义损失函数
    #3.开始训练
    no_decay=["bias","LayerNorm.weight"]
    optimizer_grouped_parameters=[{"params":[p for n,p in model.named_parameters() if not any(nd in n for nd in no_decay)],"weight_decay":0.01},
                                  {"params":[p for n,p in model.named_parameters() if any(nd in n for nd in no_decay)],"weight_decay":0.0}]
    optimizer=AdamW(optimizer_grouped_parameters,lr=config.lr)


    #权重衰减
    num_training_steps=config.epoches*len(train_data_loader)
    lr_scheduler=get_scheduler(
        "linear",
        optimizer=optimizer,
        num_warmup_steps=0,
        num_training_steps=num_training_steps
    )

    #acclearator加速
    acclerator=Accelerator()
    train_dataloader,test_dataloader,model,optimizer=acclerator.prepare(
        train_data_loader,test_data_loader,model,optimizer
    )

    #开始训练
    for epoch in range(config.epoches):
        model.train()
        for step,(token_ids,masks,y,label_len) in tqdm(enumerate(train_data_loader)):
            outputs,loss=model.forward_with_loss(token_ids,masks,y,label_len)

            #loss.backward
            acclerator.backward(loss)

            optimizer.step()
            lr_scheduler.step()
            optimizer.zero_grad()

            if step%config.steps==0:
                if not os.path.exists(config.save_dir):
                    os.mkdir(config.save_dir)
                #存储模型
                torch.save(model.state_dict(),config.save_model_path)
                logging.info(f"checkpoint has been saved in {config.save_model_path}")
                #计算f1
                y=y.cpu().tolist()

                preds=crf_decode(model,outputs,masks,label_len,config)
                f1_score_total=get_f1_score_crf(y,preds,label_len,config)
                logging.info(f"Train:loss:{loss.item()},\n f1_score_total:{f1_score_total}")
        test_loop_crf(model,test_dataloader,config)


def test_loop_crf(model,test_dataloader,config):
    """
    测试
    :param model:
    :param test_dataloader:
    :param config:
    :return:
    """
    model.eval()
    preds_list,label_list,label_len_list=[],[],[]
    total_loss=0
    for i,(token_ids,masks,y,label_len) in enumerate(test_dataloader):
        outputs,loss=model.forward_with_loss(token_ids,masks,y,label_len)

        #指标计算
        y = y.cpu().tolist()

        preds = crf_decode(model, outputs, masks, label_len, config)

        preds_list.extend(preds)
        label_list.extend(y)
        label_len_list.extend(label_len)
        total_loss+=loss.item()

    avg_loss=total_loss/i
    f1_score_total = get_f1_score_crf(label_list, preds_list, label_len_list, config)
    logging.info(f"Test:loss:{round(avg_loss,2)},\n f1:{f1_score_total}")


def predict_loop_crf(model,content,config,save_model_path):
    """
    predict模块
    :param model:
    :param content: 单个句子
    :param config:
    :return:
    """
    #分词器加载
    tokenizer=BertTokenizer.from_pretrained(config.pretrained_model_path)
    inputs = tokenizer(content, return_tensors="pt")

    #模型加载
    model.load_state_dict(torch.load(save_model_path))

    outputs=model.forward(inputs)
    mask=inputs["attention_mask"].gt(0)
    predict=model.crf.decode(outputs[:,1:-1,:],mask[:,1:-1])
    tags=id_to_label(predict[0],config)

    return tags


