## 训练自己的数据
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from eval import eval_net
from unet3d.model import UNet2D, UNet3D
import matplotlib.pyplot as plt 
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, random_split
from dataset import BasicDataset
import dataset 
from dice_loss import dice_coeff
from unet3d.losses import get_loss_criterion
import matplotlib
import h5py
# matplotlib.use("MacOSX")

train_dir = "./self_data_train/"
val_dir = "./self_data_val/"

checkpoint_dir = './checkpoints/'
epochs = 1000
batch_size = 1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
lr = 0.0001
# 验证集百分比
final_sigmoid = True 
in_channels = 1
out_channels = 1

class RunningAverage:
    """Computes and stores the average
    """

    def __init__(self):
        self.count = 0
        self.sum = 0
        self.avg = 0

    def update(self, value, n=1):
        self.count += n
        self.sum += value * n
        self.avg = self.sum / self.count

class Trainer():
    """
    unet3d trainer
    """
    def __init__(self, net):
        self.net = net 
        
        self.loss_criterion = get_loss_criterion(loss_name="BCEWithLogitsLoss", device=device, pos_weight=np.array([10])) # 构建loss
        self.optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-5)
        self.writer = SummaryWriter("./unet3d_log_self_fl/")

    def _split_training_batch(self, t):
        def _move_to_device(input):
            if isinstance(input, tuple) or isinstance(input, list):
                return tuple([_move_to_device(x) for x in input])
            else:
                return input.to(device)
        t = _move_to_device(t)
        weight = None
        if len(t) == 2:
            input, target = t
        else:
            input, target, weight = t
        return input, target, weight

    def train_net(self):
        # 训练
        net.train()
        train_losses = RunningAverage()
        # 加载已经训练完的参数
        if os.path.exists(checkpoint_dir + "unet3d_model_self_fl.pkl"):
            self.net.load_state_dict(torch.load(checkpoint_dir + "unet3d_model_self_fl.pkl", map_location=device))
        loader = dataset.get_train_loaders(train_dir, val_dir)
        self.train_loader = loader["train"]
        self.val_loader = loader["val"]
        
        self.step = 0
        best_dice = 0.0
        for epoch in range(epochs):
            ## 改成临时加载两个训练数据，这样节约空间，要不会爆内存
            print("epoch is : " + str(epoch))
            for i, t in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
            # for i, t in enumerate(self.train_loader):
                self.step += 1
                # print(
                    # f'Training iteration {self.step}. Batch {i}. Epoch [{epoch}/{epochs - 1}]')

                # input, target, weight = self._split_training_batch(t)
                weight = None 
                input, target = t

                # print(input.shape)
                # print(target.shape)
                # plt.subplot(1,2,1)
                # plt.imshow(input[0, 0, 3], cmap="gray")
                # plt.subplot(1,2,2)
                # plt.imshow(target[0, 0, 3], cmap="gray")
                # plt.show()
                # continue

                input = input.to(device)
                target = target.to(device)
                output = self.net(input)
                # compute the loss

                if weight is None:
                    loss = self.loss_criterion(output, target)
                else:
                    loss = self.loss_criterion(output, target, weight)

                train_losses.update(loss.item(), batch_size)
                # compute gradients and update parameters
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                if self.step % 3000 == 0:
                    print("epoch is " + str(epoch))
                    # set the model in eval mode
                    self.net.eval()
                    # evaluate on validation set
                    # eval_score = self.validate(self.loaders['val'])
                    # val_loss, dice_value = self.validate(self.val_loader) # 打印一下loss
                    # set the model back to training mode
                    train_loss_avg = train_losses.avg
                    print("train_loss is : " + str(train_loss_avg))
                    self.writer.add_scalar("train/loss", train_loss_avg, self.step)
                    # self.writer.add_scalar("val/loss", val_loss, self.step)
                    # self.writer.add_scalar("dice/loss", dice_value, self.step)

                    n = 0
                    dice = 0.0
                    test_loader_label = dataset.get_test_loaders(val_dir)
                    for test_loader, test_label in test_loader_label:
                        # 验证一下
                        n += 1
                        test_label = torch.from_numpy(test_label).to(device)
                        
                        dice += self.predict(test_loader, test_label)

                    dice_value = dice / n
                    self.writer.add_scalar("dice/loss", dice_value, self.step)
                    print("test dice is " + str(dice_value))

                    self.net.train()

                    if not os.path.exists("./checkpoints/"):
                        os.mkdir("./checkpoints/")
                    if dice_value > best_dice:
                        best_dice = dice_value
                        torch.save(self.net.state_dict(), checkpoint_dir + "unet3d_model_self_fl.pkl")
                        print("model is saved ~")

    def predict(self, test_loader, test_label):
        volume_shape = self._volume_shape(test_loader.dataset)

        prediction_maps_shape = (out_channels,) + volume_shape
       
        print(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')

        print('Allocating prediction and normalization arrays...')
        prediction_maps = torch.zeros(prediction_maps_shape, dtype=torch.float32).to(device) # 构建最终的输出图像。
        normalization_map = torch.zeros(prediction_maps_shape, dtype=torch.float32).to(device)
        print(prediction_maps.shape)
        with torch.no_grad():
            for batch, indices in test_loader:
                # print(indices)
                # send batch to device
                # batch : (1, channels, D, W, H)
                # indices: (1, D, W, H)
                batch = batch.to(device)
                # print("batch is " + str(batch.shape))
                # forward pass
                # predictions 就是经过前向传播后的输出。 (patch, out_channels, D, W, H）
                predictions = self.net(batch)
                predictions = torch.sigmoid(predictions)
                # convert to numpy array
                # predictions = (predictions > 0.5).long()
                # predictions = predictions.cpu().numpy()

                # for each batch sample
                # 循环每个batch数据 
                # pred: (out_channels, D, W, H)
                for pred, index in zip(predictions, indices):
                    # index: [(0, 3), (20, 25), (20, 25)] 包含深度 高度 宽度的slice
                    # save patch index: (C,D,H,W)
                    channel_slice = slice(0, out_channels)

                    index = (channel_slice,) + index
                    # print(index)
                    prediction_maps[index] += pred
                    normalization_map[index] += 1
            prediction_maps = prediction_maps / normalization_map
            print("pred sum is " + str((prediction_maps > 0.5).sum()))
            print("label sum is " + str(test_label.sum()))
            print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~`")
            dice_v = dice_coeff(prediction_maps, test_label)
            return dice_v

    def save_sample(self):
        test_loader_label = dataset.get_test_loaders(val_dir)
        for test_loader, test_label in test_loader_label:
            test_label = torch.from_numpy(test_label).to(device)
            out_path = "./output_dir/out_" + test_loader.dataset.file_path.split("/")[-1]
            h5_file = h5py.File(out_path, "w")
            predictions = self.predict_sample(test_loader, test_label)
            h5_file.create_dataset("label", data=predictions, compression="gzip")
            h5_file.close()

    def predict_sample(self, test_loader, test_label):
        volume_shape = self._volume_shape(test_loader.dataset)

        prediction_maps_shape = (out_channels,) + volume_shape
       
        print(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')

        print('Allocating prediction and normalization arrays...')
        prediction_maps = torch.zeros(prediction_maps_shape, dtype=torch.float32).to(device) # 构建最终的输出图像。
        normalization_map = torch.zeros(prediction_maps_shape, dtype=torch.float32).to(device)
        print(prediction_maps.shape)
        with torch.no_grad():
            for batch, indices in test_loader:
                # print(indices)
                # send batch to device
                # batch : (1, channels, D, W, H)
                # indices: (1, D, W, H)
                batch = batch.to(device)
                # print("batch is " + str(batch.shape))
                # forward pass
                # predictions 就是经过前向传播后的输出。 (patch, out_channels, D, W, H）
                predictions = self.net(batch)
                predictions = torch.sigmoid(predictions)
                # convert to numpy array
                # predictions = (predictions > 0.5).long()
                # predictions = predictions.cpu().numpy()

                # for each batch sample
                # 循环每个batch数据 
                # pred: (out_channels, D, W, H)
                for pred, index in zip(predictions, indices):
                    # index: [(0, 3), (20, 25), (20, 25)] 包含深度 高度 宽度的slice
                    # save patch index: (C,D,H,W)
                    channel_slice = slice(0, out_channels)

                    index = (channel_slice,) + index
                    # print(index)
                    prediction_maps[index] += pred
                    normalization_map[index] += 1
            prediction_maps = prediction_maps / normalization_map
            prediction_maps = (prediction_maps > 0.5).long()
            return prediction_maps.cpu().numpy()

    def _volume_shape(self, dataset):
        # TODO: support multiple internal datasets
        raw = dataset.raws
        print(str(raw.shape) + "raw_shape")
        if raw.ndim == 3:
            return raw.shape
        else:
            return raw.shape[1:]

    def validate(self, val_loader):
            print('Validating...')

            val_losses = RunningAverage()
            val_scores = RunningAverage()

            with torch.no_grad():
                for i, t in tqdm(enumerate(self.val_loader), total=len(self.val_loader)):
                    # print(f'Validation iteration {i}')
                    input, target, weight = self._split_training_batch(t)
                    output = self.net(input)
                    # compute the loss
                    if weight is None:
                        loss = self.loss_criterion(output, target)
                    else:
                        loss = self.loss_criterion(output, target, weight)

                    val_losses.update(loss.item(), batch_size)

                    # if model contains final_activation layer for normalizing logits apply it, otherwise
                    # the evaluation metric will be incorrectly computed
                    if hasattr(self.net, 'final_activation') and self.net.final_activation is not None:
                        output = self.net.final_activation(output)
                        # 算一下dice loss
                        ## 如果target sum == 0 那么不需要计算dice了
                        if target.sum() > 10:
                            dice_loss_v = dice_coeff(output, target)
                            print("out sum : " + str((output > 0.5).sum()))
                            print("target sum : " + str(target.sum()))
                            print(dice_loss_v)
                            val_scores.update(dice_loss_v, batch_size)

                    # if i % 100 == 0:
                    #     self._log_images(input, target, output, 'val_')

                    # eval_score = self.eval_criterion(output, target)
                    # val_scores.update(eval_score.item(), batch_size)

                    # if self.validate_iters is not None and self.validate_iters <= i:
                    #     # stop validation
                    #     break

                # self._log_stats('val', val_losses.avg, val_scores.avg)
                # logger.info(f'Validation finished. Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')
                print(f'Validation finished. Loss: {val_losses.avg}. Dice Loss: {val_scores.avg}')
                return val_losses.avg, val_scores.avg
                # return val_scores.avg


if __name__ == '__main__':
   
    print(f'Using device {device}')

    ## 因为输入是灰度图，所以in channels为1，因为是二分类去分割，因此输出channel 也为 1
    net = UNet3D(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid, num_levels=3) # 训练阶段 testing = False 最后一层不会使用sigmoid进行激活。
    net.to(device=device)
    net.eval()

    trainer = Trainer(net)
    trainer.train_net()
    # trainer.save_sample()
    
