# coding:utf-8
import gc
import csv
import numpy as np
import warnings
import os
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
import torch
from torch import multiprocessing

from loader import loader2_hierarchy
from nezha.util import nezha_torch_tool
from nezha.modeling.nezha_hierarchy_classify import NeZhaHierarchyClassification

multiprocessing.set_sharing_strategy('file_system')


def evaluation(model, val_dataloader, is_test=False):
    model.eval()
    preds, labels = [], []
    val_iterator = tqdm(val_dataloader, desc='Evaluation', total=len(val_dataloader))
    with torch.no_grad():
        for batch in val_iterator:
            batch_cuda = nezha_torch_tool.cuda(batch)
            prob = model(**batch_cuda)
            preds.extend([i for i in torch.argmax(prob, 1).cpu().numpy().tolist()])
            labels.extend([i for i in batch_cuda['labels'].cpu().numpy().tolist()])
            if is_test:
                break
    acc = nezha_torch_tool.accuracy_score(y_true=labels, y_pred=preds)
    return acc


def train_one_fold(args, fold, train_index, dev_index, is_test=False):
    model_save_path = os.path.join(args.out_dir, f'cv/last-checkpoint-{fold}')
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    model = nezha_torch_tool.cuda(NeZhaHierarchyClassification.from_pretrained(args.model_path))
    nezha_torch_tool.save_model(model, tokenizer, model_save_path)

    train_dataloader, val_dataloader = loader2_hierarchy.load_cv_data(args, train_index, dev_index, tokenizer)
    total_steps = args.num_epochs * len(train_dataloader)
    optimizer, scheduler = nezha_torch_tool.build_optimizer(args, model, total_steps)
    total_loss, cur_avg_loss, global_steps = 0., 0., 0
    best_acc_score = 0.

    for epoch in range(1, args.num_epochs + 1):
        train_iterator = tqdm(train_dataloader, desc=f'fold={fold} Training epoch : {epoch}', total=len(train_dataloader))
        model.train()

        for batch in train_iterator:
            batch_cuda = nezha_torch_tool.cuda(batch)
            loss = model(**batch_cuda)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

            total_loss += loss.item()
            cur_avg_loss += loss.item()

            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

            train_iterator.set_postfix_str(f'running training loss: {loss.item():.4f}')
            if (global_steps + 1) % args.logging_step == 0 or is_test:

                epoch_avg_loss = cur_avg_loss / args.logging_step
                global_avg_loss = total_loss / (global_steps + 1)

                acc = evaluation(model, val_dataloader, is_test=is_test)
                print(f"\n>> epoch={epoch},  global steps={global_steps + 1},  acc={acc:0.4f}  "
                      f" avg_loss={epoch_avg_loss:.4f}, global_avg_loss={global_avg_loss:.4f}.")

                if acc > best_acc_score:
                    best_acc_score = acc
                    nezha_torch_tool.save_model(model, tokenizer, model_save_path)
                    print(f'\n>>>\n    best acc={best_acc_score}')
                model.train()
                cur_avg_loss = 0.0
            global_steps += 1
            if is_test:
                break
        if is_test:
            break
    del model, optimizer, scheduler
    torch.cuda.empty_cache()
    gc.collect()  # 垃圾回收：释放掉已经销毁对象占用的内存（避免kaggle等环境中内存不够）


def train_all_fold(args, is_test=False):
    skf = StratifiedKFold(shuffle=True, n_splits=args.num_folds)
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    loader2_hierarchy.save_data(args, tokenizer)
    train = pd.read_csv(args.train_path, sep=',')
    y = train.iloc[:, 3]

    for fold, (train_index, dev_index) in enumerate(skf.split(X=range(train.shape[0]), y=y)):
        if fold >= args.use_fold:
            break
        train_one_fold(args, fold, train_index, dev_index, is_test=is_test)


def predict_cv(args, is_test=False):
    print('\n>> loading test dataset ... ...')
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    train_dataloader, test_dataloader = loader2_hierarchy.load_data(args, tokenizer)
    print('\n>> cv data')
    print('\n>> start predicting ... ...')

    final_res = None
    for fold in range(args.num_folds):
        if fold >= args.use_fold:
            break
        best_model_path = os.path.join(args.out_dir, f'cv/last-checkpoint-{fold}')
        best_model = NeZhaHierarchyClassification.from_pretrained(best_model_path)
        best_model = nezha_torch_tool.cuda(best_model)
        best_model.eval()

        test_iterator = tqdm(test_dataloader, desc=f'Predict test data fold={fold}', total=len(test_dataloader))

        all_prob = []
        with torch.no_grad():
            for batch in test_iterator:
                batch_cuda = nezha_torch_tool.cuda(batch)
                prob = best_model(**batch_cuda)
                all_prob.extend(prob.cpu().numpy())
                if is_test:
                    break

        res = np.vstack(all_prob)

        if final_res is None:
            final_res = res
        else:
            final_res += res

    print('\n>> combining ... ...')
    predictions = np.argmax(final_res, axis=-1)
    k = 0
    result = []
    for i in predictions:
        result.append((k, str(i)))
        k += 1

    submit_path = f'{args.out_dir}/submitb.csv'
    with open(submit_path, 'w', newline='') as f:
        tsv_w = csv.writer(f, delimiter=',')
        tsv_w.writerow(['id', 'label'])
        tsv_w.writerows(result)
    print('\n>> predict completed .')


class TrainConfig:
    out_dir = 'data/0118_checkpoint_309999'
    data_dir = 'data/shandong'
    new_pretrain_dir = 'E:/code/data/pretrain_model_file/哪吒继续预训练/checkpoint-312000'
    if not os.path.exists(new_pretrain_dir):
        new_pretrain_dir = 'data/new_pretrain_dir/0113/checkpoint-312000'
        new_pretrain_dir = 'data/new_pretrain_dir/0113/record/checkpoint-309999'
    train_path = f'{data_dir}/train.csv'
    test_path = f'{data_dir}/testb_nolabel.csv'

    model_path = new_pretrain_dir

    num_epochs = 15
    batch_size = 10
    max_seq_len = 350

    learning_rate = 5e-5
    eps = 1e-8

    warmup_ratio = 0.1
    weight_decay = 0.01

    logging_step = 300

    seed = 9527

    num_folds = 12
    use_fold = 5

    high_merge_labels = [  # 人类没法分辨同一组标签有什么区别  不同组不能有重叠的标签，不然再叠加概率的时候会有问题
        [1, 3, 4, 6, 12, 22],
        [5, 10, 16],
        [8, 17],
        [9, 13, 14],
    ]


def main_train_classify(shutdown=False, is_test=False):
    warnings.filterwarnings('ignore')
    path_list = [TrainConfig.out_dir]
    nezha_torch_tool.make_dirs(path_list)

    nezha_torch_tool.seed_everything(TrainConfig.seed)

    train_all_fold(TrainConfig, is_test=is_test)
    predict_cv(TrainConfig, is_test=is_test)
    if shutdown:
        os.system("shutdown")  # 使用云服务器，训练完自动关机，省钱


if __name__ == '__main__':
    # main_train_classify(shutdown=False, is_test=True)
    # predict_cv(TrainConfig, is_test=True)
    # main_train_classify(shutdown=True, is_test=False)
    predict_cv(TrainConfig, is_test=True)
