from collections import OrderedDict
import numpy as np
import torch
from experiment import exp_metrics
from model import loss_module, segment_module
from tqdm import tqdm

import logging
logging.basicConfig(format='> %(asctime)s | %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)

class BaseRunner(object):

    def __init__(self,
                 model,
                 dataset,
                 dataloader,
                 device,
                 loss_module,
                 optimizer,
                 args
                 ):

        self.model = model
        self.dataset = dataset
        self.dataloader = dataloader
        self.device = device
        self.optimizer = optimizer
        self.loss_module = loss_module
        self.epoch_metrics = OrderedDict()
        self.args =args

    def train(self, epoch_num=None):
        raise NotImplementedError('Please override in child class')

    def evaluate(self):
        raise NotImplementedError('Please override in child class')

def pred_aggr_regression(model, x, x_seg_all, x_seg_reduce, padding, args, pred_type='type0'):
    predlist = []
    for j, feat in enumerate(x_seg_reduce):
        x_ = torch.cat((x[j], feat), dim=0)
        #print('-----',x_.shape)
        x_.unsqueeze_(dim=0)
        #print('pred_aggr_regression')
        #print(x_.shape)
        basic_pred = model(x_.float(), key_padding_masks=padding, src_masks=None)[1]
        #print(basic_pred.shape)
        predlist.append(basic_pred)
    pred = torch.cat(predlist, dim=0)
    return pred

class RegressionRunner(BaseRunner):
    def __init__(self, *args, **kwargs):

        super(RegressionRunner, self).__init__(*args, **kwargs)

        self.analyzer = exp_metrics.AnalyzerMetric(task='regression')
        self.target_scaler = self.dataset.target_scaler
        '''
        if isinstance(self.loss_module, torch.nn.CrossEntropyLoss):
            self.classification = True  # True if classification, False if regression
            self.analyzer = exp_metrics.AnalyzerMetric(task='classification')
        else:
            self.classification = False # True if classification, False if regression
            self.analyzer = exp_metrics.AnalyzerMetric(task='regression')
        '''

    def train_epoch(self, epoch_num=None):
        self.model = self.model.train()

        epoch_sum_loss = 0  # total loss of epoch
        eppch_samples = 0  # total samples in epoch
        #print('Start train_epoch()...')
        for i, batch in enumerate(self.dataloader):
            x, targets, padding, info = batch  # X, targets, the_padding_mask, info
            x = x.to(self.device)
            targets = targets.to(self.device)
            padding = padding.to(self.device)
            x_seg_all, x_seg_reduce = segment_module.calculate_segment_feature(x, args=self.args)
            pred = pred_aggr_regression(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
            batch_loss = self.loss_module(pred, targets)
            loss = torch.sum(batch_loss) / len(targets)
            self.optimizer.zero_grad()
            loss.backward(retain_graph=True)
            self.optimizer.step()
            with torch.no_grad():
                epoch_sum_loss += loss.item()
                eppch_samples += x.shape[0]
        epoch_mean_loss = epoch_sum_loss / eppch_samples
        self.epoch_metrics['epoch_num'] = epoch_num
        self.epoch_metrics['epoch_mean_loss'] = epoch_mean_loss
        torch.cuda.empty_cache()
        return self.epoch_metrics

    def evaluate(self):

        self.model = self.model.eval()

        epoch_sum_loss = 0  # total loss of epoch
        epoch_samples = 0  # total samples in epoch

        per_batch = {'targets': [],
                     'predictions': [],
                     'batch_metric': [],
                     'info': []}
        for i, batch in enumerate(self.dataloader):
            x, targets, padding, info = batch  # X, targets, the_padding_mask, info
            x = x.to(self.device)
            targets = targets.to(self.device)
            padding = padding.to(self.device)
            #
            x_seg_all, x_seg_reduce = segment_module.calculate_segment_feature(x, args=self.args)
            pred = pred_aggr_regression(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
            batch_metrics = self.analyzer.get_metrics(targets, pred)  # total metrics of a batch
            with torch.no_grad():
                per_batch['targets'].append(targets.cpu())
                per_batch['predictions'].append(pred.cpu())
                per_batch['batch_metric'].append(batch_metrics)
                per_batch['info'].append(info)

        # Aggregation pred
        targets_ = torch.cat(per_batch['targets'], dim=0)
        predictions_ = torch.cat(per_batch['predictions'], dim=0)
        # inverse the aggregation
        targets_real = self.target_scaler.inverse_transform(targets_.numpy())
        predictions_real=  self.target_scaler.inverse_transform(predictions_.numpy())

        final_metric = self.analyzer.get_metrics(y_true=targets_real, y_pred=predictions_real)
        self.epoch_metrics['epoch_metrics'] = final_metric
        torch.cuda.empty_cache()

        return self.epoch_metrics, targets_real, predictions_real

class ClassificationRunner(BaseRunner):
    def __init__(self, *args, **kwargs):

        super(ClassificationRunner, self).__init__(*args, **kwargs)

        self.analyzer = exp_metrics.AnalyzerMetric(task='classification')

    def train_epoch(self, epoch_num=None):
        self.model = self.model.train()

        epoch_sum_loss = 0  # total loss of epoch
        eppch_samples = 0  # total samples in epoch

        for i, batch in enumerate(tqdm(self.dataloader)):
            x, targets, padding, info = batch  # X, targets, the_padding_mask, info
            x = x.to(self.device)
            targets = targets.to(self.device)
            padding = padding.to(self.device)
            #
            # get pred
            #print('*************************')
            pred = self.model(x.float(), key_padding_masks=padding, src_masks=None)[1]
            #print('*************************')
            #pred = segment_module.fusion_prediction(x, model=self.model, padding=padding, args=self.args)

            # print('*************************')
            #x_seg_all, x_seg_reduce = segment_module.calculate_segment_feature(x, args=self.args)
            #pred = pred_aggr_regression(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
            batch_loss = self.loss_module(pred, targets)
            loss = torch.sum(batch_loss) / x.shape[0]

            self.optimizer.zero_grad()
            loss.backward(retain_graph=True)
            self.optimizer.step()

            with torch.no_grad():
                epoch_sum_loss += loss.item()
                eppch_samples += x.shape[0]

        epoch_mean_loss = epoch_sum_loss / eppch_samples
        self.epoch_metrics['epoch_num'] = epoch_num
        self.epoch_metrics['epoch_mean_loss'] = epoch_mean_loss
        torch.cuda.empty_cache()

        return self.epoch_metrics

    def evaluate(self):
        logger.info('Evaluation on Supervised Experiment')
        self.model = self.model.eval()

        epoch_sum_loss = 0  # total loss of epoch
        epoch_samples = 0  # total samples in epoch

        per_batch = {'targets': [],
                     'predictions': [],
                     'batch_metric': [],
                     'info': []}
        for i, batch in enumerate(self.dataloader):
            #print('batch idx: {}'.format(i))
            x, targets, padding, info = batch  # X, targets, the_padding_mask, info
            x = x.to(self.device)
            targets = targets.to(self.device)
            padding = padding.to(self.device)
            # get pred
            # print('*************************')
            pred = self.model(x.float(), key_padding_masks=padding, src_masks=None)[1]
            # get pred
            #pred = segment_module.fusion_prediction(x, model=self.model, padding=padding, args=self.args)
            # get pred
            #x_seg_all, x_seg_reduce = segment_module.calculate_segment_feature(x, args=self.args)
            #pred = pred_aggr_regression(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
            batch_metrics = self.analyzer.get_metrics(targets, pred)  # total metrics of a batch
            #print(pred_acc)
            with torch.no_grad():
                per_batch['targets'].append(targets.cpu())
                per_batch['predictions'].append(pred.cpu())
                per_batch['batch_metric'].append(batch_metrics)
                per_batch['info'].append(info)

        # Aggregation pred
        targets_ = torch.cat(per_batch['targets'], dim=0)
        predictions_ = torch.cat(per_batch['predictions'], dim=0)

        final_metric = self.analyzer.get_metrics(y_true=targets_, y_pred=predictions_)
        self.epoch_metrics['epoch_metrics'] = final_metric
        torch.cuda.empty_cache()

        return self.epoch_metrics, targets_, predictions_

###########################################################################
###########################################################################

def pred_aggr_unsupervised(model, x, x_seg_all, x_seg_reduce, padding, args, pred_type='type0'):
    predlist = []
    for j, feat in enumerate(x_seg_reduce):
        x_ = torch.cat((x[j], feat), dim=0)
        #print('-----',x_.shape)
        x_.unsqueeze_(dim=0)
        basic_pred = model(x_.float(), key_padding_masks=padding, src_masks=None)[1]
        predlist.append(basic_pred)


    pred = torch.cat(predlist, dim=0)
    return pred


def split_contrast_data(model, x, x_seg_all, x_seg_reduce, padding, args):
    batch =  x[0]
    z_pos = model( x.float(), key_padding_masks=padding, src_masks=None)[2]   # x, pred, z
    z_neg = []
    for j, feat in enumerate(x_seg_reduce):
        feat = feat.unsqueeze(dim=0)
        z_ = model(feat.float(), key_padding_masks=padding, src_masks=None)[2]
        z_neg.append(z_)
    z_neg = torch.cat(z_neg, dim=0)  # x, pred, z
    #print('z_pos, z_neg',z_pos.shape,  z_neg.shape)
    return z_pos, z_neg

def split_contrast_data_PU(model, x, x_seg_all, x_seg_reduce, padding, args):
    z_t = model(x.float(), key_padding_masks=padding, src_masks=None)[3]   # x, pred, z
    z_pos = []
    for j, feat in enumerate(x_seg_reduce):
        feat = feat.unsqueeze(dim=0)
        z_ = model(feat.float(), key_padding_masks=padding, src_masks=None)[3]
        z_pos.append(z_)
    z_pos = torch.cat(z_pos, dim=0)  # x, pred, z, z_contrast
    z_neg = z_t
    return z_t, z_pos, z_neg

class UnsupervisedClassificationRunner(BaseRunner):
    def __init__(self, *args, **kwargs):

        super(UnsupervisedClassificationRunner, self).__init__(*args, **kwargs)

        self.analyzer = exp_metrics.AnalyzerMetric(task='classification')
        self.lossf = torch.nn.BCEWithLogitsLoss(reduction='mean') # BCELoss BCEWithLogitsLoss sum

    def train_epoch(self, epoch_num=None):
        self.model = self.model.train()

        epoch_sum_loss = 0  # total loss of epoch
        eppch_samples = 0  # total samples in epoch

        for i, batch in enumerate(self.dataloader):
            x, targets, padding, info = batch  # X, targets, the_padding_mask, info
            x = x.to(self.device)
            targets = targets.to(self.device)
            padding = padding.to(self.device)
            # get pred
            #print('----> Start Contrastive Learning')
            if epoch_num % 8 == 0:
                logger.info('Current Loss -> Unsupervised')
                x_seg_all, x_seg_reduce = segment_module.calculate_segment_feature(x, args=self.args)
                z_ref, z_pos, z_neg = split_contrast_data_PU(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
                loss_ = loss_module.triplet_loss_pu(z_ref, z_pos, z_neg, self.lossf, self.args)
            else:
                logger.info('Current Loss -> Supervised')
                pred = self.model(x.float(), key_padding_masks=padding, src_masks=None)[1]
                # or
                #pred = segment_module.fusion_prediction(x, model=self.model, padding=padding, args=self.args)
                loss_ = self.loss_module(pred, targets)
            batch_loss = loss_
            loss = torch.sum(batch_loss) / x.shape[0]
            self.optimizer.zero_grad()
            loss.backward(retain_graph=True)
            self.optimizer.step()

            with torch.no_grad():
                epoch_sum_loss += loss.item()
                eppch_samples += x.shape[0]

        epoch_mean_loss = epoch_sum_loss / eppch_samples
        self.epoch_metrics['epoch_num'] = epoch_num
        self.epoch_metrics['epoch_mean_loss'] = epoch_mean_loss
        torch.cuda.empty_cache()

        return self.epoch_metrics

    def evaluate(self):
        logger.info('Evaluation on Unsupervised Experiment')
        self.model = self.model.eval()

        epoch_sum_loss = 0  # total loss of epoch
        epoch_samples = 0  # total samples in epoch

        per_batch = {'targets': [],
                     'predictions': [],
                     'batch_metric': [],
                     'info': []}
        for i, batch in enumerate(self.dataloader):
            x, targets, padding, info = batch  # X, targets, the_padding_mask, info
            x = x.to(self.device)
            targets = targets.to(self.device)
            padding = padding.to(self.device)
            # get pred
            pred = self.model(x.float(), key_padding_masks=padding, src_masks=None)[1]
            # get pred
            #x_seg_all, x_seg_reduce = segment_module.calculate_segment_feature(x, args=self.args)
            #pred = pred_aggr_regression(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
            batch_metrics = self.analyzer.get_metrics(targets, pred)  # total metrics of a batch
            #print(pred_acc)
            with torch.no_grad():
                per_batch['targets'].append(targets.cpu())
                per_batch['predictions'].append(pred.cpu())
                per_batch['batch_metric'].append(batch_metrics)
                per_batch['info'].append(info)

        # Aggregation pred
        targets_ = torch.cat(per_batch['targets'], dim=0)
        predictions_ = torch.cat(per_batch['predictions'], dim=0)

        final_metric = self.analyzer.get_metrics(y_true=targets_, y_pred=predictions_)
        self.epoch_metrics['epoch_metrics'] = final_metric
        torch.cuda.empty_cache()

        return self.epoch_metrics, targets_, predictions_

'''elif self.args.exp_type == 'pretrain' and self.args.use_pretrain:
# print('----> Start Linear Learning')
pred = self.model(x.float())[1]
# pred = pred_aggr_unsupervised(model=self.model, x=x, x_seg_all=x_seg_all, x_seg_reduce=x_seg_reduce, padding=padding, args=self.args)
batch_loss = self.loss_module(pred, targets)'''