import torch
from dice_loss import dice_coeff
from unet3d.model import UNet3D
import dataset 
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class RunningAverage:
    """Computes and stores the average
    """
    def __init__(self):
        self.count = 0
        self.sum = 0
        self.avg = 0

    def update(self, value, n=1):
        self.count += n
        self.sum += value * n
        self.avg = self.sum / self.count

if __name__ == "__main__":
    print(f'Using device {device}')

    ## 因为输入是灰度图，所以in channels为1，因为是二分类去分割，因此输出channel 也为 1
    net = UNet3D(in_channels=1, out_channels=1, final_sigmoid=True, num_levels=3) # 训练阶段 testing = False 最后一层不会使用sigmoid进行激活。
    net.testing = True 
    net.to(device=device)
    # faster convolutions, but more memory
    # cudnn.benchmark = True
    net.load_state_dict(torch.load("./checkpoints/unet3d_model_self.pkl"))

    loader = dataset.get_train_loaders("self_data_train", "self_data_val")
    train_loader = loader["train"]
    val_loader = loader["val"]

    val_losses = RunningAverage()
    val_scores = RunningAverage()

    with torch.no_grad():
        for t in val_loader:
            # print(f'Validation iteration {i}')
            input, target = t 
            input = input.to(device)
            target = target.to(device)
            output = net(input)

            # if model contains final_activation layer for normalizing logits apply it, otherwise
            # the evaluation metric will be incorrectly computed
            # if hasattr(self.net, 'final_activation') and self.net.final_activation is not None:
            #     output = self.net.final_activation(output)
                # 算一下dice loss
            # print(output.sum())
            dice_loss_v = dice_coeff(output, target)
            print(dice_loss_v)
            if target.sum() > 100:
                val_scores.update(dice_loss_v, 1)

            # if i % 100 == 0:
            #     self._log_images(input, target, output, 'val_')

            # eval_score = self.eval_criterion(output, target)
            # val_scores.update(eval_score.item(), batch_size)

            # if self.validate_iters is not None and self.validate_iters <= i:
            #     # stop validation
            #     break

        # self._log_stats('val', val_losses.avg, val_scores.avg)
        # logger.info(f'Validation finished. Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')
        print(f'Validation finished.  Dice Loss: {val_scores.avg}')
       

