# coding:utf-8
import gc
import csv
import numpy as np
import warnings
import os
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
import torch
from torch import multiprocessing

import loader
from nezha.util import nezha_torch_tool
from nezha.modeling.nezha_classify import NeZhaForSequenceClassification

multiprocessing.set_sharing_strategy('file_system')


def train_one_fold(args, fold, train_index, dev_index):
    model_save_path = os.path.join(args.out_dir, f'cv/last-checkpoint-{fold}')
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    model = nezha_torch_tool.cuda(NeZhaForSequenceClassification.from_pretrained(args.model_path))

    train_dataloader, val_dataloader = loader.load_cv_data(args, train_index, dev_index, tokenizer)
    total_steps = args.num_epochs * len(train_dataloader)
    optimizer, scheduler = nezha_torch_tool.build_optimizer(args, model, total_steps)
    total_loss, cur_avg_loss, global_steps = 0., 0., 0
    best_acc_score = 0.

    for epoch in range(1, args.num_epochs + 1):
        train_iterator = tqdm(train_dataloader, desc=f'fold={fold} Training epoch : {epoch}', total=len(train_dataloader))
        model.train()

        for batch in train_iterator:
            batch_cuda = nezha_torch_tool.cuda(batch)
            loss, logits = model(**batch_cuda)[:2]
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

            total_loss += loss.item()
            cur_avg_loss += loss.item()

            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

            train_iterator.set_postfix_str(f'running training loss: {loss.item():.4f}')
            if (global_steps + 1) % args.logging_step == 0:

                epoch_avg_loss = cur_avg_loss / args.logging_step
                global_avg_loss = total_loss / (global_steps + 1)

                print(f"\n>> epoch={epoch},  global steps={global_steps + 1}, "
                      f"epoch avg loss={epoch_avg_loss:.4f}, global avg loss={global_avg_loss:.4f}.")

                metric = nezha_torch_tool.evaluation(model, val_dataloader)
                acc, f1, avg_val_loss = metric['acc'], metric['f1'], metric['avg_val_loss']

                if acc > best_acc_score:
                    best_acc_score = acc
                    nezha_torch_tool.save_model(model, tokenizer, model_save_path)
                    print(f'\n>>>\n    best acc={best_acc_score}, dev loss={avg_val_loss} .')
                model.train()
                cur_avg_loss = 0.0
            global_steps += 1

    del model, optimizer, scheduler
    torch.cuda.empty_cache()
    gc.collect()  # 垃圾回收：释放掉已经销毁对象占用的内存（避免kaggle等环境中内存不够）


def train_all_fold(args):
    skf = StratifiedKFold(shuffle=True, n_splits=args.num_folds)
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    loader.save_data(args, tokenizer)
    train = pd.read_csv(args.train_path, sep=',')
    y = train.iloc[:, 3]

    for fold, (train_index, dev_index) in enumerate(skf.split(X=range(train.shape[0]), y=y)):
        train_one_fold(args, fold, train_index, dev_index)


def predict_cv(args):
    print('\n>> loading test dataset ... ...')
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    train_dataloader, test_dataloader = loader.load_data(args, tokenizer)
    print('\n>> cv data')
    print('\n>> start predicting ... ...')

    final_res = None
    for fold in range(args.num_folds):
        best_model_path = os.path.join(args.out_dir, f'cv/last-checkpoint-{fold}')
        best_model = NeZhaForSequenceClassification.from_pretrained(best_model_path)
        best_model = nezha_torch_tool.cuda(best_model)
        best_model.eval()

        test_iterator = tqdm(test_dataloader, desc='Predict test data', total=len(test_dataloader))

        all_prob = []
        with torch.no_grad():
            for batch in test_iterator:
                batch_cuda = nezha_torch_tool.cuda(batch)
                logits = best_model(**batch_cuda)[0]
                prob = logits.softmax(-1)
                all_prob.extend(prob.cpu().numpy())

        res = np.vstack(all_prob)

        if final_res is None:
            final_res = res
        else:
            final_res += res

    print('\n>> combining ... ...')
    predictions = np.argmax(final_res, axis=-1)
    k = 0
    result = []
    for i in predictions:
        result.append((k, str(i)))
        k += 1

    submit_path = f'{args.out_dir}/submita.csv'
    with open(submit_path, 'w', newline='') as f:
        tsv_w = csv.writer(f, delimiter=',')
        tsv_w.writerow(['id', 'label'])
        tsv_w.writerows(result)
    print('\n>> predict completed .')


class TrainConfig:
    out_dir = 'data/0114'
    data_dir = 'data/shandong'
    new_pretrain_dir = 'E:/code/data/pretrain_model_file/哪吒继续预训练/checkpoint-312000'
    train_path = f'{data_dir}/train.csv'
    test_path = f'{data_dir}/testa_nolabel.csv'

    model_path = new_pretrain_dir

    num_epochs = 15
    batch_size = 10
    max_seq_len = 350

    learning_rate = 5e-5
    eps = 1e-8

    warmup_ratio = 0.1
    weight_decay = 0.01

    logging_step = 300

    seed = 9527

    num_folds = 5


def main_train_classify(shutdown=False):
    warnings.filterwarnings('ignore')
    path_list = [TrainConfig.out_dir]
    nezha_torch_tool.make_dirs(path_list)

    nezha_torch_tool.seed_everything(TrainConfig.seed)

    train_all_fold(TrainConfig)
    predict_cv(TrainConfig)
    if shutdown:
        os.system("shutdown")  # 使用云服务器，训练完自动关机，省钱


if __name__ == '__main__':
    main_train_classify(shutdown=False)
