# coding:utf-8
import gc
import csv
import numpy as np
import warnings
import os
from tqdm import tqdm
import torch
from torch import multiprocessing

import loader
from nezha.modeling.nezha_classify import NeZhaForSequenceClassification
from nezha.util import nezha_torch_tool

multiprocessing.set_sharing_strategy('file_system')


def train(args, model_dir=''):
    model_dir = model_dir or args.out_dir
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.pretrain_dir)
    model = nezha_torch_tool.cuda(NeZhaForSequenceClassification.from_pretrained(args.pretrain_dir))
    loader.save_data(args, tokenizer)

    train_dataloader, test_dataloader = loader.load_data(args, tokenizer)
    total_steps = args.num_epochs * len(train_dataloader)
    optimizer, scheduler = nezha_torch_tool.build_optimizer(args, model, total_steps)
    total_loss, cur_avg_loss, global_steps = 0., 0., 0
    for epoch in range(1, args.num_epochs + 1):
        train_iterator = tqdm(train_dataloader, desc=f'Training epoch : {epoch}', total=len(train_dataloader))
        model.train()
        for batch in train_iterator:
            batch_cuda = nezha_torch_tool.cuda(batch)
            loss, logits = model(**batch_cuda)[:2]
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

            total_loss += loss.item()
            cur_avg_loss += loss.item()

            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

            global_steps += 1

            train_iterator.set_postfix_str(f'running training loss: {loss.item():.4f}')

    nezha_torch_tool.save_model(model, tokenizer, model_dir)

    del model, tokenizer, optimizer, scheduler
    torch.cuda.empty_cache()
    gc.collect()  # 垃圾回收：释放掉已经销毁对象占用的内存（避免kaggle等环境中内存不够）


def predict_full(args):
    print('\n>> loading test dataset ... ...')
    tokenizer = nezha_torch_tool.BertTokenizer.from_pretrained(args.model_path)
    train_dataloader, test_dataloader = loader.load_data(args, tokenizer)

    print('\n>> full data')

    print('\n>> start predicting ... ...')
    best_model = NeZhaForSequenceClassification.from_pretrained(args.out_dir)
    best_model = nezha_torch_tool.cuda(best_model)
    best_model.eval()

    val_iterator = tqdm(test_dataloader, desc='Predict test data',
                        total=len(test_dataloader))
    p_logit = []
    with torch.no_grad():
        for batch in val_iterator:
            batch_cuda = nezha_torch_tool.cuda(batch)
            logits = best_model(**batch_cuda)[0]
            p_logit.extend(torch.softmax(logits, -1).cpu().numpy())

    res = np.vstack(p_logit)

    final_res = res
    final_res.tolist()

    predictions = np.argmax(final_res, axis=-1)

    k = 0
    result = []
    for i in predictions:
        result.append((k, str(i)))
        k += 1

    submit_path = f'{args.out_dir}/submita.csv'
    with open(submit_path, 'w', newline='') as f:
        tsv_w = csv.writer(f, delimiter=',')
        tsv_w.writerow(['id', 'label'])
        tsv_w.writerows(result)

    print('\n>> predict completed .')


class TrainConfig:
    out_dir = 'data/0114'
    data_dir = 'data/shandong'
    pretrain_dir = 'E:/code/data/pretrain_model_file/哪吒继续预训练/checkpoint-312000'

    train_path = f'{data_dir}/train.csv'
    test_path = f'{data_dir}/testa_nolabel.csv'

    num_epochs = 15
    batch_size = 10
    max_seq_len = 350

    learning_rate = 5e-5
    eps = 1e-8

    warmup_ratio = 0.1
    weight_decay = 0.01

    logging_step = 300

    seed = 9527


def main_train_classify(shutdown=False):
    warnings.filterwarnings('ignore')
    os.makedirs(TrainConfig.out_dir, exist_ok=True)

    nezha_torch_tool.seed_everything(TrainConfig.seed)

    train(TrainConfig)
    predict_full(TrainConfig)
    if shutdown:
        os.system("shutdown")  # 使用云服务器，训练完自动关机，省钱


if __name__ == '__main__':
    main_train_classify(shutdown=False)
