import torch

from sklearn import metrics
from utils.metrics import *
from tqdm import tqdm
import os


# BATCH_SIZE= 128

class Trainer():
    def __init__(self, model, optimizer, criterion, train_data_loader, val_data_loader, device, epoch=100,
                 lr_scheduler=None, writer=None):
        self.train_data_loader = train_data_loader
        self.val_data_loader = val_data_loader
        self.model = model
        self.epoch = epoch
        self.optimizer = optimizer
        self.criterion = criterion
        self.lr_scheduler = lr_scheduler
        self.device = device
        self.writer = writer

    def train(self, pth_save_path, fold=None):

        max_f1 = 0
        max_auc = 0
        max_wf1 = 0
        min_train_loss = float('inf')
        min_val_loss = float('inf')
        iters = len(self.train_data_loader)

        for i_epoch in range(1, self.epoch + 1):
            if fold != None:
                print('-----------------Fold %i: %i/%i--------------------' % (fold, i_epoch, self.epoch))
            else:
                print('-----------------%i/%i--------------------' % (i_epoch, self.epoch))
            train_loss = 0.0
            self.model.train()

            correct_count = torch.tensor(0, dtype=torch.int16).to(self.device)
            for i, training_data in enumerate(tqdm(self.train_data_loader)):
                inputs, labels, idx = training_data
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                self.optimizer.zero_grad()
                if self.criterion is not None:
                    output, x1 = self.model(inputs)
                    # if isinstance(output,tuple):
                    #     output
                    loss = self.criterion(output, labels)
                else:
                    output = self.model(inputs, labels)
                    loss, text_fea = output

                loss.backward()
                self.optimizer.step()
                if self.lr_scheduler != None:
                    self.lr_scheduler.step(self.epoch + i / iters)
                train_loss += loss.item()
                pred = torch.argmax(output.data, dim=1)
                result = labels == pred
                sum_result = torch.sum(result)
                correct_count = correct_count + sum_result
            n_data = self.train_data_loader.dataset.index.shape[0]
            acc = correct_count.data.float() / n_data
            print('+++Training Loss: %.3f, Training Accuracy: %i/%i=%f' % (train_loss, correct_count.data, n_data, acc))

            self.model.eval()
            val_loss = 0.0
            correct_count = torch.tensor(0, dtype=torch.int16).to(self.device)
            output_list = []
            pres_list = []
            labels_list = []
            for i, val_data in enumerate(tqdm(self.val_data_loader)):
                inputs, labels, idx = val_data
                inputs = inputs.to(self.device)
                # inputs = self.grid(inputs)
                labels = labels.to(self.device)
                with torch.no_grad():
                    output, x1 = self.model(inputs)
                    if self.criterion is not None:
                        loss = self.criterion(output, labels)
                    else:
                        loss, text_fea = output
                    output = torch.softmax(output.data, dim=1)
                    prediction = torch.argmax(output, dim=1)
                    output_list += output.data.detach().cpu().numpy().tolist()
                    pres_list += prediction.detach().cpu().numpy().tolist()
                    labels_list += labels.detach().cpu().numpy().tolist()
                    result = labels == prediction
                    correct_count = correct_count + torch.sum(result).int()

                val_loss += loss.item()
            val_n_data = self.val_data_loader.dataset.index.shape[0]
            acc = correct_count.data.float() / val_n_data
            print('+++Validation Loss: %.3f, Validation Accuracy: %i/%i=%f' % (
                val_loss, correct_count.data, val_n_data, acc))

            avg_precision, avg_recall, avg_f1, weighted_f1 = get_classification_report(labels_list, pres_list)
            auc = get_auc(labels_list, output_list)

            # if log_loss<min_val_loss:
            #     min_val_loss = log_loss
            #     min_epoch = iter
            #     cur_model_w = self.model.state_dict()
            #     torch.save(cur_model_w, pth_save_path)
            if i_epoch < 5:
                continue
            has_saved = False
            if avg_f1 >= max_f1 and not has_saved:
                max_f1 = avg_f1
                temp_model_name = "ckpt_%d_f1_%.3f.pth" % (i_epoch, avg_f1)
                ckpt_name = os.path.join(pth_save_path, temp_model_name)
                torch.save(self.model.module.state_dict(), ckpt_name)
                has_saved = True
            if auc >= max_auc and not has_saved:
                max_auc = auc
                temp_model_name = "ckpt_%d_auc_%.3f.pth" % (i_epoch, auc)
                ckpt_name = os.path.join(pth_save_path, temp_model_name)
                torch.save(self.model.module.state_dict(), ckpt_name)
                has_saved = True
            if weighted_f1 >= max_wf1 and not has_saved:
                max_wf1 = weighted_f1
                temp_model_name = "ckpt_%d_wf1_%.3f.pth" % (i_epoch, weighted_f1)
                ckpt_name = os.path.join(pth_save_path, temp_model_name)
                torch.save(self.model.module.state_dict(), ckpt_name)
                has_saved = True
            if train_loss <= min_train_loss and not has_saved:
                min_train_loss = train_loss
                temp_model_name = "min_train_loss.pth"
                ckpt_name = os.path.join(pth_save_path, temp_model_name)
                torch.save(self.model.module.state_dict(), ckpt_name)
                has_saved = True
            if val_loss <= min_val_loss and not has_saved:
                min_val_loss = val_loss
                temp_model_name = "min_val_loss.pth"
                ckpt_name = os.path.join(pth_save_path, temp_model_name)
                torch.save(self.model.module.state_dict(), ckpt_name)
                has_saved = True
            if has_saved:
                print('Saved to %s done' % ckpt_name)
            if self.writer != None:
                self.writer.add_scalar('Testing Acc', acc.item(), global_step=i_epoch)
                self.writer.add_scalar('Test Loss', val_loss, global_step=i_epoch)
                self.writer.add_scalar('Avg Precision', avg_precision, global_step=i_epoch)
                self.writer.add_scalar('Avg Recall', avg_recall, global_step=i_epoch)
                self.writer.add_scalar('Avg F1', avg_f1, global_step=i_epoch)
                self.writer.add_scalar('AUC', auc, global_step=i_epoch)
                self.writer.add_scalar('Weighted F1', weighted_f1, global_step=i_epoch)
            # if val_auc>max_auc:
            #     max_auc = val_auc
            #     min_epoch = iter
            #     cur_model_w = self.model.state_dict()
            #     torch.save(cur_model_w, pth_save_path)

        # state_dict = torch.load(pth_save_path)
        # self.model.load_state_dict(state_dict)
        # print(min_epoch,min_val_loss)
        # predictions = []
        # self.model.eval()
        # with open(result_path.replace('loss',str(min_val_loss)),'w') as file:
        #     for i, test_data in enumerate(tqdm(self.test_data_loader)):
        #         report_ids, word_vec_arrays, = test_data
        #         inputs = word_vec_arrays.to(self.device)
        #         # inputs = self.grid(inputs)
        #         with torch.no_grad():
        #             outputs = self.model(inputs)
        #         # predictions.append(output.cpu().numpy())
        #         # outputs = torch.softmax(outputs,dim=1)
        #
        #         outputs = torch.sigmoid(outputs)
        #         results = outputs.cpu().numpy()
        #         for j,result in enumerate(results):
        #             result = list(map(lambda x:str(x),list(result)))
        #             result_str = ' '.join(result)
        #             # print(report_id[j] + '|,|' + result_str)
        #             file.write(report_ids[j] + '|,|' + result_str)
        #             file.write('\n')
        #     print('Done')
        return min_val_loss, max_auc
