from models.textCNN import CNN_Text
import torch
import torch.nn as nn
from models.radam import RAdam
from models.focal_loss import FocalLoss
from datasets.words_data import Word_data
from torch.utils.data import DataLoader
from sklearn.model_selection import StratifiedKFold
import numpy as np
from tqdm import tqdm
from trainer import Trainer
import inferner
import time
import random

if __name__ == '__main__':

    BATCH_SIZE = 256
    embed_num = 859
    epoch = 40
    max_length = 80
    # device = torch.device("cpu")
    train_data_csv = r'data/track1_round1_train_20210222_all.csv'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    all_data = Word_data(train_data_csv, is_train=True, max_length=max_length)
    labels = all_data.label_list
    pretrain_weights_path_list = [r'save/textCNN_f0_20210330223007.pth',
                                  r'save/textCNN_f1_20210330223807.pth',
                                  r'save/textCNN_f2_20210330224929.pth',
                                  r'save/textCNN_f3_20210330225921.pth',
                                  r'save/textCNN_f4_20210330231119.pth']
    # new_labels=[]
    # for label in labels:
    #     groups = label.split(' ')
    #     new_label = random.choice(groups)
    #     new_labels.append(new_label)
    # new_labels = np.array(new_labels)

    folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021).split(np.arange(all_data.data_list.shape[0]),
                                                                               labels)
    print()
    loss_list = []
    max_auc_list = []
    chkp_list = []
    for fold, (trn_idx, val_idx) in enumerate(folds):
        print('------------------Fold %i--------------------' % fold)
        train_data = Word_data(train_data_csv, is_train=True, idx=trn_idx, max_length=max_length)
        val_data = Word_data(train_data_csv, is_train=True, idx=val_idx, max_length=max_length)
        train_data_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
        val_data_loader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)

        model = CNN_Text(embed_num)
        pre_weight = torch.load(pretrain_weights_path_list[fold])
        model.load_state_dict(pre_weight)
        layers = [model.fc1]
        params = []
        for layer in layers:
            for param in layer.parameters():
                params.append(param)
        for layer in [model.embed,model.convs]:
            for param in layer.parameters():
                param.requires_grad = False
        model.to(device)
        optimizer = RAdam(model.parameters(), lr=0.0001)
        # optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3 ,weight_decay=5e-4)

        # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=3, T_mult=2, eta_min=1e-5, last_epoch=-1)

        criterion = nn.BCEWithLogitsLoss(weight=torch.from_numpy(train_data.class_weights).to(device))

        # trainer = Trainer(model,optimizer,criterion,train_data_loader,val_data_loader,epoch=epoch,lr_scheduler=lr_scheduler)
        trainer = Trainer(model, optimizer, criterion, train_data_loader, val_data_loader, epoch=epoch)
        save_name = 'save/textCNN_f%i_%s.pth' % (fold, time.strftime("%Y%m%d%H%M%S", time.localtime()))
        min_val_loss, max_val_auc = trainer.train(save_name)
        print('Fold' + str(fold), min_val_loss)
        print('Fold' + str(fold), max_val_auc)
        loss_list.append(min_val_loss)
        max_auc_list.append(max_val_auc)
        chkp_list.append(save_name)
    print(max_auc_list)
    print(loss_list)
    model = CNN_Text(embed_num).to(device)
    test_data = Word_data(r'data/track1_round1_testA_20210222.csv', is_train=False, max_length=max_length)
    test_data_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)  # 使用DataLoader加载数据

    inferner.infer(model, chkp_list, 'result/textCNN_%s.csv' % time.strftime("%Y%m%d%H%M%S", time.localtime()),
                   test_data_loader, device, is_round=False)
