# coding:utf-8
import gc
import os
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
import torch
from torch import multiprocessing

from loader import loader2_hierarchy
from nezha.util import nezha_torch_tool
from nezha.modeling.nezha_hierarchy_classify import NeZhaHierarchyClassification

multiprocessing.set_sharing_strategy('file_system')


def evaluation(model, val_dataloader, is_test=False):
    model.eval()
    preds, labels = [], []
    val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for i, batch in enumerate(val_iterator):
            batch_cuda = nezha_torch_tool.cuda(batch)
            prob = model(**batch_cuda)
            preds.extend([i for i in torch.argmax(prob, 1).cpu().numpy().tolist()])
            labels.extend([i for i in batch_cuda['labels'].cpu().numpy().tolist()])
            if is_test:
                break
            # if i > 0:
            #     exit()
    acc = nezha_torch_tool.accuracy_score(y_true=labels, y_pred=preds)
    return acc


class TrainConfig:
    out_dir = 'data/0117'
    data_dir = 'data/shandong'
    new_pretrain_dir = 'E:/code/data/pretrain_model_file/哪吒继续预训练/checkpoint-312000'
    if not os.path.exists(new_pretrain_dir):
        new_pretrain_dir = 'data/new_pretrain_dir/0113/checkpoint-312000'
    train_path = f'{data_dir}/train.csv'
    test_path = f'{data_dir}/testa_nolabel.csv'

    model_path = new_pretrain_dir

    num_epochs = 15
    batch_size = 10
    max_seq_len = 350

    learning_rate = 5e-5
    eps = 1e-8

    warmup_ratio = 0.1
    weight_decay = 0.01

    logging_step = 300

    seed = 9527

    num_folds = 12
    use_fold = 5

    high_merge_labels = [  # 人类没法分辨同一组标签有什么区别  不同组不能有重叠的标签，不然再叠加概率的时候会有问题
        [1, 3, 4, 6, 12, 22],
        [5, 10, 16],
        [8, 17],
        [9, 13, 14],
    ]


def test_one_threshold(args, threshold_prob, add_prob, data, is_test=False):
    model_save_path = os.path.join(args.out_dir, f'cv/last-checkpoint-0')
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    model = nezha_torch_tool.cuda(NeZhaHierarchyClassification.from_pretrained(model_save_path))

    model.threshold_prob = threshold_prob
    model.add_prob = add_prob
    model.eval()
    acc = evaluation(model, data, is_test=is_test)
    print(f'threshold_prob={threshold_prob:0.2f}   add_prob={add_prob:0.2f}   acc={acc:0.12f}')
    del model
    torch.cuda.empty_cache()
    gc.collect()  # 垃圾回收：释放掉已经销毁对象占用的内存（避免kaggle等环境中内存不够）
    return threshold_prob, add_prob, acc


def main_test_threshold(is_test=False):
    # 获取验证集
    args = TrainConfig
    skf = StratifiedKFold(shuffle=True, n_splits=4)
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    loader2_hierarchy.save_data(args, tokenizer)
    train = pd.read_csv(args.train_path, sep=',')
    y = train.iloc[:, 3]
    for fold, (train_index, dev_index) in enumerate(skf.split(X=range(train.shape[0]), y=y)):
        train_dataloader, val_dataloader = loader2_hierarchy.load_cv_data(args, train_index, dev_index, tokenizer)
        break

    infos = []
    for i in range(1):
        threshold_prob = 0.5 + 0.2 * i
        for j in range(2):
            add_prob = 1.1 * (j)
            info = test_one_threshold(TrainConfig, threshold_prob, add_prob, val_dataloader, is_test=is_test)
            infos.append(info)
    infos.sort(key=lambda x: -x[2])
    print('============================== 排序后 =================================')
    for threshold_prob, add_prob, acc in infos:
        print(f'threshold_prob={threshold_prob:0.2f}   add_prob={add_prob:0.2f}   acc={acc:0.12f}')


if __name__ == '__main__':
    # main_test_threshold(is_test=True)
    main_test_threshold(is_test=False)
