import os
import time

import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data.distributed
from tensorboardX import SummaryWriter
from torch.cuda.amp import GradScaler, autocast

from dataloader import get_loader
from models import SwinUNETR
from losses import LossBraTS
from metrics import Metrics


class AverageMeter(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = np.where(self.count > 0, self.sum / self.count, self.sum)
        
        
class Evaluater():
    def __init__(self, args) -> None:
        self.args = args 
        self.trainer_initialized = False
        
    def initial_trainer(self):
        self.trainer_initialized = False
        self.config_dataset()
        self.init_model()
        self.config_metrics()
        
        self.trainer_initialized = True
        
    def config_dataset(self):
        self.test_loader = get_loader(
            datalist_json=self.args.data_json,
            batch_size=self.args.batch_size,
            num_works=self.args.workers,
            phase='test'
        )
        print('Train data number: {} | Val data number: {} | Test data number: {}'.format(
            len(self.test_loader) * self.args.batch_size))
        
    def config_metrics(self):
        self.metrics = Metrics(num_classes=3)
        print("Config metrics")

    def init_model(self):
        self.model = SwinUNETR(
            img_size=self.args.img_size,
            in_channels=self.args.in_channels,
            out_channels=self.args.out_channels,
            spatial_dims=self.args.spatial_dims
        ).to('cuda')
        
        pytorch_model_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
        print("Model parameters count", pytorch_model_params)
    
    def load_checkpoint(self):
        checkpoint = torch.load(self.args.checkpoint, map_location="cpu")
        from collections import OrderedDict

        new_state_dict = OrderedDict()
        for k, v in checkpoint["state_dict"].items():
            new_state_dict[k.replace("backbone.", "")] = v
        self.model.load_state_dict(new_state_dict, strict=False)
        print("=> loaded model checkpoint")

        if "epoch" in checkpoint:
            start_epoch = checkpoint["epoch"]
        if "best_acc" in checkpoint:
            best_acc = checkpoint["best_acc"]
        if "optimizer" in checkpoint.keys():
            for k, v in checkpoint["optimizer"].items():
                new_state_dict[k.replace("backbone.", "")] = v
            self.optimizer.load_state_dict(new_state_dict)
            for state in self.optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.cuda()      
            print("=> loaded optimizer checkpoint")
        if "scheduler" in checkpoint.keys():
            for k, v in checkpoint["scheduler"].items():
                new_state_dict[k.replace("backbone.", "")] = v
            self.scheduler.load_state_dict(new_state_dict)
            self.scheduler.step(epoch=start_epoch)
            print("=> loaded scheduler checkpoint")
        print("=> loaded checkpoint '{}' (epoch {}) (bestacc {})".format(self.args.checkpoint, start_epoch, best_acc))
    
    def train_one_epoch(self):
        self.model.train()
        self.optimizer.zero_grad()
        run_loss = AverageMeter()
        
        for idx, batch_data in enumerate(self.train_loader):
            img, label = batch_data["image"].cuda(), batch_data['label'].cuda()
            
            label = label.to(torch.float32)
            img = img.to(torch.float32)
                
            with autocast(enabled=self.args.amp):
                y = self.model(img)
                loss = self.loss(y, label)

            self.optimizer.zero_grad()
            if self.args.amp:
                self.scaler.scale(loss).backward()
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                loss.backward()
                self.optimizer.step()

            run_loss.update(loss.item(), n=self.args.batch_size)

        return run_loss.avg
    
    def validata(self):
        self.model.eval()
        dice_meter = AverageMeter()
        hd_meter = AverageMeter()
        nsd_meter = AverageMeter()

        with torch.no_grad():
            for idx, batch_data in enumerate(self.val_loader):
                img, label = batch_data["image"].cuda(), batch_data['label'].cuda()

                with autocast(enabled=self.args.amp):
                    y = self.model(img)
                    
                    y = torch.sigmoid(y)
                    y = torch.where(y > 0.5, 1., 0.)
                    dsc, nsd, hd = self.metrics(y, label)

                dice_meter.update(dsc, n=self.args.batch_size)
                hd_meter.update(nsd, n=self.args.batch_size)
                nsd_meter.update(hd, n=self.args.batch_size)

        return dice_meter.avg, hd_meter.avg, nsd_meter.avg
    
    def train(self):
        if self.trainer_initialized is False:
            self.initial_trainer()
            
            val_loss_min = 0.0
    
        for epoch in range(self.args.start_epoch, self.args.max_epochs):

            print(time.ctime(), "Epoch:", epoch)
            epoch_time = time.time()
            train_loss = self.train_one_epoch()

            print(
                "Train Epoch  {}/{}".format(epoch, self.args.max_epochs - 1),
                "loss: {:.4f}".format(train_loss),
                "time {:.2f}s".format(time.time() - epoch_time),
            )

            self.writer.add_scalar("train_loss", train_loss, epoch)
            
            if self.scheduler is not None:
                self.scheduler.step()

            if (epoch + 1) % self.args.val_every == 0:

                epoch_time = time.time()
                dice, hd, nsd = self.validata()

                print(
                    "Final validation stats {}/{}".format(epoch, self.args.max_epochs - 1),
                    ", val_dice, hd, nsd:",
                    dice, hd, nsd,
                    ", time {:.2f}s".format(time.time() - epoch_time),
                )

                if self.writer is not None:
                    self.writer.add_scalar("Mean_Val_Dice", np.mean(dice), epoch)

                val_avg_acc = np.mean(dice)

                if val_avg_acc < val_loss_min:
                    print("new best ({:.6f} --> {:.6f}). ".format(val_loss_min, val_avg_acc))
                    val_loss_min = val_avg_acc

                    self.save_checkpoint(file_name="ckpt_best.pt", epoch=epoch, best_acc=val_loss_min)

                self.save_checkpoint(file_name="ckpt_final.pt", epoch=epoch, best_acc=val_loss_min)

        print("Training Finished !, Best Accuracy: ", val_loss_min)
        return val_loss_min
    

    def evaluate(self):
        self.model.eval()
        dice_meter = AverageMeter()
        hd_meter = AverageMeter()
        nsd_meter = AverageMeter()

        with torch.no_grad():
            for idx, batch_data in enumerate(self.val_loader):
                img, label = batch_data["image"].cuda(), batch_data['label'].cuda()

                with autocast(enabled=self.args.amp):
                    y = self.model(img)     
                    
                    y = torch.sigmoid(y)
                    y = torch.where(y > 0.5, 1., 0.)
                    dsc, nsd, hd = self.metrics(y, label)

                dice_meter.update(dsc, n=self.args.batch_size)
                hd_meter.update(nsd, n=self.args.batch_size)
                nsd_meter.update(hd, n=self.args.batch_size)

        return dice_meter.avg, hd_meter.avg, nsd_meter.avg

