# -*- coding: utf-8 -*-

import sys
import torch
from tqdm import tqdm as tqdm
import segmentation_models_pytorch as smp
# from  import AverageValueMeter


class TCSMEpoch:

    def __init__(self, model, loss, optimizer, metrics, verbose=True):
        self.model = model
        self.loss = loss
        self.optimizer = optimizer
        self.metrics = metrics
        self.verbose = verbose
        self.update_ema_count = 0

    def _format_logs(self, logs):
        str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
        s = ', '.join(str_logs)
        return s
    
    def update_ema_teacher(self):
        alpha = min(1 - 1 / (self.update_ema_count+1), self.model.smoothing)
        for ema_param, param in zip(self.model.teacher_model.parameters(), self.model.student_model.parameters()):
            param = param.to(self.model.teacher_device)
            ema_param.data.mul_(alpha).add_(torch.tensor(1 - alpha), param.data)
        self.update_ema_count += 1
        
    
    def batch_update(self, labeled, target, unlabeled, epoch):
        self.optimizer.zero_grad()
        labeled_student_pred, labeled_teacher_pred, labeled_trans_args = self.model(labeled)
        unlabeled_student_pred, unlabeled_teacher_pred, unlabeled_trans_args = self.model(unlabeled)
        
        loss = self.loss(
            labeled_student_pred,
            labeled_teacher_pred,
            unlabeled_student_pred,
            unlabeled_teacher_pred,
            target,
            labeled_trans_args,
            unlabeled_trans_args,
            epoch
        )
        loss.backward()
        
        self.optimizer.step()
        self.update_ema_teacher()
        
        return loss, labeled_student_pred


    def batch_predict(self, labeled, target):
        labeled_pred = self.model.predict(labeled)
        target = target.to(labeled_pred.device).long()
        with torch.no_grad():
            loss = torch.nn.CrossEntropyLoss()(labeled_pred, target[:,0,...])
        
        return loss, labeled_pred
        

    def train(self, labeled_loader, unlabeled_loader, epoch):

        self.model.train()

        logs = {}
        loss_meter = smp.utils.meter.AverageValueMeter()
        metrics_meters = {metric.__name__: smp.utils.meter.AverageValueMeter() for metric in self.metrics}

        
        with tqdm(labeled_loader, desc='train', file=sys.stdout, disable=not (self.verbose)) as labeled_loader_iter:
            unlabeled_loader_iter = iter(unlabeled_loader)
            for labeled, target in labeled_loader_iter:

                unlabeled = unlabeled_loader_iter.next()
                loss, pred = self.batch_update(labeled, target, unlabeled, epoch)

                # update loss logs
                loss_value = loss.cpu().detach().numpy()
                loss_meter.add(loss_value)
                loss_logs = {self.loss.__name__: loss_meter.mean}
                logs.update(loss_logs)

                # update metrics logs
                for metric_fn in self.metrics:
                    target = target.to(pred.device)
                    metric_value = metric_fn(pred, target).cpu().detach().numpy()
                    metrics_meters[metric_fn.__name__].add(metric_value)
                metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
                logs.update(metrics_logs)

                if self.verbose:
                    s = self._format_logs(logs)
                    labeled_loader_iter.set_postfix_str(s)

        return logs
    
    
    def valid(self, labeled_loader):
        
        self.model.eval()
        
        logs = {}
        loss_meter = smp.utils.meter.AverageValueMeter()
        metrics_meters = {metric.__name__: smp.utils.meter.AverageValueMeter() for metric in self.metrics}
        
        with tqdm(labeled_loader, desc='valid', file=sys.stdout, disable=not (self.verbose)) as labeled_loader_iter:
            for labeled, target in labeled_loader_iter:

                loss, pred = self.batch_predict(labeled, target)

                # update loss logs
                loss_value = loss.cpu().detach().numpy()
                loss_meter.add(loss_value)
                loss_logs = {self.loss.__name__: loss_meter.mean}
                logs.update(loss_logs)

                # update metrics logs
                for metric_fn in self.metrics:
                    target = target.to(pred.device)
                    metric_value = metric_fn(pred, target).cpu().detach().numpy()
                    metrics_meters[metric_fn.__name__].add(metric_value)
                metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
                logs.update(metrics_logs)

                if self.verbose:
                    s = self._format_logs(logs)
                    labeled_loader_iter.set_postfix_str(s)

        return logs


# if __name__ == "__main__":
#     from CC_CCI_dataset import get_TCSM_dataloader
#     from model import TCSM
#     from loss import TCSMLoss
    
#     IMAGE_DIR = "D:\data\ct_lesion_seg\image"
#     MASK_DIR = "D:\data\ct_lesion_seg\mask"
#     train_loader, valid_loader, unlabeled_loader = get_TCSM_dataloader(IMAGE_DIR, MASK_DIR, 5, device='cuda')
    
#     student = smp.DeepLabV3Plus(encoder_weights=None, in_channels=1, classes=3, activation='softmax2d')
#     teacher = smp.DeepLabV3Plus(encoder_weights=None, in_channels=1, classes=3, activation='softmax2d')
#     tcsm = TCSM(student, teacher)
    
#     loss = TCSMLoss()
#     optimizer = torch.optim.Adam(params=tcsm.parameters())
    
#     tcsmEpoch = TCSMEpoch(tcsm, loss, optimizer, [], verbose = True)
    
#     for i in range(1):
#         tcsmEpoch.train(train_loader, unlabeled_loader, i)
#         tcsmEpoch.valid(valid_loader)