from models.dnn import DNN
from datasets.tf_idf_data import TF_IDF_data
import torch
import torch.nn as nn
from models.radam import RAdam
from torch.utils.data import DataLoader
from sklearn.model_selection import StratifiedKFold
import numpy as np
from trainer import Trainer
import inferner
import time

if __name__ == '__main__':

    BATCH_SIZE = 128
    epoch = 200
    # device = torch.device("cpu")
    train_data_csv = r'data/track1_round1_train_20210222_all.csv'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    all_data = TF_IDF_data(train_data_csv, is_train=True)
    labels = np.array(all_data.label_list)
    folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021).split(np.arange(all_data.tf_idf_list.shape[0]),
                                                                               labels)
    loss_list = []
    auc_list = []
    chkp_list = []

    for fold, (trn_idx, val_idx) in enumerate(folds):
        print('------------------Fold %i--------------------' % fold)
        train_data = TF_IDF_data(train_data_csv, idf_dict=all_data.idf_dict, is_train=True, idx=trn_idx)
        val_data = TF_IDF_data(train_data_csv, idf_dict=all_data.idf_dict, is_train=True, idx=val_idx)
        train_data_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
        val_data_loader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=False)

        model = DNN(len(train_data.__getitem__(0)[1]), len(train_data.__getitem__(0)[2])).to(device)
        optimizer = RAdam(model.parameters(), lr=0.001)
        criterion = nn.BCEWithLogitsLoss()
        # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=3, T_mult=2, eta_min=1e-5,
        #                                                                     last_epoch=-1)
        trainer = Trainer(model, optimizer, criterion, train_data_loader, val_data_loader, epoch=epoch)
        save_name = 'save/tf_idf_f%i_%s.pth' % (fold, time.strftime("%Y%m%d%H%M%S", time.localtime()))

        min_val_loss, max_val_auc = trainer.train(save_name)
        print('Fold' + str(fold), min_val_loss)
        print('Fold' + str(fold), max_val_auc)

        loss_list.append(min_val_loss)
        auc_list.append(max_val_auc)
        chkp_list.append(save_name)

    print('Loss list:', loss_list)
    print('AUC list:', auc_list)
    test_data = TF_IDF_data(r'data/track1_round1_testA_20210222.csv', idf_dict=all_data.idf_dict, is_train=False)
    test_data_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False)  # 使用DataLoader加载数据
    model = DNN(len(train_data.__getitem__(0)[1]), len(train_data.__getitem__(0)[2])).to(device)
    inferner.infer(model, chkp_list, 'result/tf_idf_%s.csv' % time.strftime("%Y%m%d%H%M%S", time.localtime()),
                   test_data_loader, device)
    print('Done')
