import time
import gc
import warnings
import os
from tqdm import tqdm
import torch
from torch import multiprocessing

from loader import loader4_baidu_senti
from nezha.util import nezha_torch_tool
from fish_tool import sys_tool, logs
from fish_tool.ai import torch_tool
from nezha.modeling.nezha_interpret import NeZhaForSequenceClassification

multiprocessing.set_sharing_strategy('file_system')
warnings.filterwarnings('ignore')


def evaluation(model, val_dataloader, is_test=False):
    model.eval()
    preds, labels = [], []
    val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for batch in val_iterator:
            prob = model(**batch)
            preds.extend([i for i in torch.argmax(prob, 1).cpu().numpy().tolist()])
            labels.extend([i for i in batch['labels'].cpu().numpy().tolist()])
            if is_test:
                break
    acc = nezha_torch_tool.accuracy_score(y_true=labels, y_pred=preds)
    return acc


def get_valid_score(c, model, dtype, test_num=None):
    model.eval()
    data = loader4_baidu_senti.get_data(c, dtype, test_num=test_num)
    tqdm_data = tqdm(data, desc=f'score {dtype}', leave=False)
    total, right = 0, 0
    for batch in tqdm_data:
        args = sys_tool.get_safe_args(model.forward, batch)
        with torch.no_grad():
            out = model(**args)
        if dtype == 'inter':
            pred = out['interpret_logit'].argmax(dim=-1)
            gold = batch['rationale']
            total += batch['rationale'].numel()
            right += (pred == gold).sum().item()
        else:
            pred = out['logits'].argmax(dim=-1)
            gold = batch['labels']
            total += batch['labels'].numel()
            right += (pred == gold).sum().item()
    acc = right / total
    return acc


def train_one_epoch(model, data, optimizer, scheduler, epoch):
    model.train()
    start = 0
    for batch in data:
        resp = model(**batch)
        loss = resp['loss']
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

        optimizer.step()
        scheduler.step()
        optimizer.zero_grad()
        now = time.time()
        if now - start > 2:
            start = now
            data.set_description(f'Training epoch={epoch}  loss={loss.item():.4f}')


def train(args, test_num=0, repeat=1):
    os.makedirs(args.out_dir, exist_ok=True)
    model_save_path = os.path.join(args.out_dir, 'model.pth')
    model = NeZhaForSequenceClassification(args)
    model = torch_tool.cuda(model)

    train_data = loader4_baidu_senti.get_data(args, 'train', repeat=repeat)
    total_steps = args.num_epochs * len(train_data)
    optimizer, scheduler = nezha_torch_tool.build_optimizer(args, model, total_steps)
    bad_num = 0
    best_score = 0
    best_score = get_valid_score(args, model, 'valid', test_num=50)
    inter_score = get_valid_score(args, model, 'inter', test_num=100)
    train_score = get_valid_score(args, model, 'train', test_num=50)
    logs.brief.info(f'start!    best_score={best_score:0.4f}    train_score={train_score:0.4f}   inter_score={inter_score:0.4f}')

    for epoch in range(1, args.num_epochs + 1):
        inter_data = loader4_baidu_senti.get_data(args, 'inter', test_num=test_num, repeat=repeat)
        tqdm_data = tqdm(inter_data, desc=f'Training epoch : {epoch}', total=len(inter_data))
        train_one_epoch(model, tqdm_data, optimizer, scheduler, epoch)

        train_data = loader4_baidu_senti.get_data(args, 'train', test_num=test_num, repeat=repeat)
        tqdm_data = tqdm(train_data, desc=f'Training epoch : {epoch}', total=len(train_data))
        train_one_epoch(model, tqdm_data, optimizer, scheduler, epoch)

        inter_data = loader4_baidu_senti.get_data(args, 'inter', test_num=test_num, repeat=repeat)
        tqdm_data = tqdm(inter_data, desc=f'Training epoch : {epoch}', total=len(inter_data))
        train_one_epoch(model, tqdm_data, optimizer, scheduler, epoch)

        inter_score = get_valid_score(args, model, 'inter', test_num=100)
        train_score = get_valid_score(args, model, 'train', test_num=50)
        valid_score = get_valid_score(args, model, 'valid')
        if valid_score >= best_score:
            best_score = valid_score
            logs.brief.info(f'save_model    best_score={best_score:0.4f}    train_score={train_score:0.4f}   inter_score={inter_score:0.4f}')
            torch_tool.save_model(model, model_save_path)
        else:
            logs.brief.info(f'不保存模型    best_score={best_score:0.4f}(vliad={valid_score:0.4f})    '
                            f'train_score={train_score:0.4f}   inter_score={inter_score:0.4f}')
            bad_num += 1
            if bad_num > 50:
                logs.brief.info(f'已经有 {bad_num}次损失不再降低 停止预训练')
                break

    del model, optimizer, scheduler
    torch.cuda.empty_cache()
    gc.collect()  # 垃圾回收：释放掉已经销毁对象占用的内存（避免kaggle等环境中内存不够）


class TrainConfig:
    pre_model_dir = os.path.join(os.path.dirname(__file__), 'data/baidu_senti_20220508/record/checkpoint-164430')
    data_dir = 'E:/code/data/LIC2022-百度比赛/百度-2022语言与智能技术竞赛：情感可解释评测'
    if not os.path.exists(data_dir):
        data_dir = '/home/wangxiaoyu/data/LIC2022-baidu/s情感可解释评测'
    logs.tmp.info(f'data_dir={data_dir}')
    out_dir = os.path.join(os.path.dirname(__file__), 'data/baidu_senti_20220514')

    num_epochs = 25
    batch_size = 10
    max_seq_len = 350
    learning_rate = 5e-5
    eps = 1e-8
    warmup_ratio = 0.1
    weight_decay = 0.01
    hidden_size = 768
    num_labels = 2

    logging_step = 300
    seed = 9527
    torch_tool.seed_everything(seed)


if __name__ == '__main__':
    logs.print('=' * 30)
    loader4_baidu_senti.show_data_num(TrainConfig)
    # train(TrainConfig, is_test=True)
    train(TrainConfig, test_num=0, repeat=1)
    # predict_cv(TrainConfig, is_test=True)
    # main_train_classify(shutdown=True, is_test=False)
    # predict_cv(TrainConfig, is_test=True)
