from models.lstm import LSTM
import torch
import torch.nn as nn
from models.radam import RAdam
from models.focal_loss import FocalLoss
from datasets.words_data import Word_data
from torch.utils.data import DataLoader
from sklearn import metrics

from tqdm import tqdm
BATCH_SIZE= 128

class Trainer():
    def __init__(self,model,optimizer,criterion,train_data_loader,val_data_loader,epoch=100, lr_scheduler=None):
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # self.device = torch.device( "cpu")
        self.train_data_loader = train_data_loader
        self.val_data_loader = val_data_loader
        self.model = model
        self.epoch = epoch
        self.optimizer = optimizer
        self.criterion = criterion
        self.lr_scheduler = lr_scheduler

    def train(self,pth_save_path):

        min_val_loss = float('inf')
        max_auc = 0
        min_epoch = 0
        iters = len(self.train_data_loader)
        for iter in range(1,self.epoch+1):
            print('------------------%i--------------------'%iter)
            train_loss = 0.0
            self.model.train()
            count=0
            for i, training_data in enumerate(tqdm(self.train_data_loader)):
                count+=1
                report_ids, words_list, labels = training_data
                inputs = words_list.to(self.device)
                # inputs = self.grid(inputs)
                labels = labels.to(self.device)
                self.optimizer.zero_grad()
                if self.criterion is not None:
                    output = self.model(inputs)
                    loss = self.criterion(output, labels.float())
                else:
                    output = self.model(inputs,labels)
                    loss, text_fea = output

                loss.backward()
                self.optimizer.step()
                if self.lr_scheduler != None:
                    self.lr_scheduler.step(self.epoch + count / iters)
                train_loss += loss.item()
            print(train_loss)
            self.model.eval()
            val_loss = 0.0
            pres_list=[]
            labels_list=[]
            for i, val_data in enumerate(tqdm(self.val_data_loader)):
                report_ids, word_vec_arrays, labels = val_data
                inputs = word_vec_arrays.to(self.device)
                # inputs = self.grid(inputs)
                labels = labels.to(self.device)
                with torch.no_grad():
                    output = self.model(inputs)
                    if self.criterion is not None:
                        loss = self.criterion(output, labels.float())
                    else:
                        loss, text_fea = output
                    pres_list += output.sigmoid().detach().cpu().numpy().tolist()
                    labels_list += labels.detach().cpu().numpy().tolist()
                val_loss += loss.item()
            val_auc = metrics.roc_auc_score(labels_list, pres_list, multi_class='ovo')
            log_loss = metrics.log_loss(labels_list, pres_list)  #

            print('Val loss:',val_loss)
            print('Val Log loss:',log_loss)
            print('Val AUC:',val_auc)
            # if val_loss<0.9:
            #     break
            if log_loss<min_val_loss:
                min_val_loss = log_loss
                min_epoch = iter
                cur_model_w = self.model.state_dict()
                torch.save(cur_model_w, pth_save_path)
            # if val_auc>max_auc:
            #     max_auc = val_auc
            #     min_epoch = iter
            #     cur_model_w = self.model.state_dict()
            #     torch.save(cur_model_w, pth_save_path)

        # state_dict = torch.load(pth_save_path)
        # self.model.load_state_dict(state_dict)
        # print(min_epoch,min_val_loss)
        # predictions = []
        # self.model.eval()
        # with open(result_path.replace('loss',str(min_val_loss)),'w') as file:
        #     for i, test_data in enumerate(tqdm(self.test_data_loader)):
        #         report_ids, word_vec_arrays, = test_data
        #         inputs = word_vec_arrays.to(self.device)
        #         # inputs = self.grid(inputs)
        #         with torch.no_grad():
        #             outputs = self.model(inputs)
        #         # predictions.append(output.cpu().numpy())
        #         # outputs = torch.softmax(outputs,dim=1)
        #
        #         outputs = torch.sigmoid(outputs)
        #         results = outputs.cpu().numpy()
        #         for j,result in enumerate(results):
        #             result = list(map(lambda x:str(x),list(result)))
        #             result_str = ' '.join(result)
        #             # print(report_id[j] + '|,|' + result_str)
        #             file.write(report_ids[j] + '|,|' + result_str)
        #             file.write('\n')
        #     print('Done')
        return min_val_loss,max_auc


#
# for i,(report_id, words), in enumerate(testing_data_list):
#     result = []
#     for j in range(len(label_list[i])):
#         result.append(str(predictions[j][i]))
#     print(report_id+'|,|'+' '.join(result))