from models.lstm import LSTM
import torch
import torch.nn as nn
from models.radam import RAdam
from models.focal_loss import FocalLoss
from datasets.words_data import Word_data
from torch.utils.data import DataLoader
from sklearn.model_selection import StratifiedKFold
import numpy as np
from tqdm import tqdm
from trainer import Trainer
import inferner
import time
if __name__ =='__main__':

    BATCH_SIZE= 64
    epoch = 60
    # device = torch.device("cpu")
    train_data_csv=r'data/track1_round1_train_20210222_all.csv'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    all_data = Word_data(train_data_csv, is_train=True)
    labels = all_data.label_list
    # folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021).split(np.arange(all_data.data_list.shape[0]),
    #                                                                            labels)
    folds = StratifiedKFold(n_splits=5, shuffle=True).split(np.arange(all_data.data_list.shape[0]),
                                                                               labels)
    print()
    loss_list = []
    max_auc_list=[]
    chkp_list=[]
    cur_time = time.strftime("%Y%m%d%H%M%S", time.localtime())

    for fold, (trn_idx, val_idx) in enumerate(folds):
        print('------------------Fold %i--------------------' % fold)
        train_data = Word_data(train_data_csv, is_train=True,idx=trn_idx)
        val_data = Word_data(train_data_csv, is_train=True,idx=val_idx)
        train_data_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True,num_workers=4)
        val_data_loader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=False,num_workers=4)

        model = LSTM(num_class=17,need_embedding=True).to(device)
        optimizer = RAdam(model.parameters(),lr=0.001)
        criterion = nn.BCEWithLogitsLoss()

        trainer = Trainer(model,optimizer,criterion,train_data_loader,val_data_loader,epoch=epoch)
        save_name = 'save/lstm_embedding_f%i_%s.pth'%(fold,cur_time)
        min_val_loss,max_val_auc = trainer.train(save_name)
        print('Fold'+str(fold),min_val_loss)
        print('Fold'+str(fold),max_val_auc)
        loss_list.append(min_val_loss)
        max_auc_list.append(max_val_auc)
        chkp_list.append(save_name)
    print(max_auc_list)
    test_data = Word_data(r'data/track1_round1_testA_20210222.csv',is_train=False)
    test_data_loader = DataLoader(test_data, batch_size=32, shuffle=False, num_workers=2)  # 使用DataLoader加载数据
    model = LSTM(num_class=17, need_embedding=True).to(device)
    inferner.infer(model,chkp_list,'result/lstm_embedding_%s.csv'%cur_time,test_data_loader,device )
