### 尝试用2D的网络训练下自己的数据
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from eval import eval_net
from unet3d.model import UNet2D, UNet3D
import matplotlib.pyplot as plt 
from scipy.ndimage import rotate, map_coordinates, gaussian_filter
from torchvision.transforms import Compose

import matplotlib 
# print(matplotlib.get_backend())
# matplotlib.use("MacOSX")
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, random_split, Dataset
from dataset import BasicDataset
import h5py
GLOBAL_RANDOM_STATE = np.random.RandomState(47)

train_file = "./self_data_2d_train.h5"
val_file = "./self_data_2d_val.h5"
dir_checkpoint = 'checkpoints/'
epochs = 100
batch_size = 1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
lr = 0.0001
img_size = (256, 256)
final_sigmoid = True 
in_channels = 1
out_channels = 1

class RandomFlip:
    """
    Randomly flips the image across the given axes. Image can be either 3D (DxHxW) or 4D (CxDxHxW).

    When creating make sure that the provided RandomStates are consistent between raw and labeled datasets,
    otherwise the models won't converge.
    """

    def __init__(self, seed, axis_prob=0.5, **kwargs):
    
        self.seed = seed
        self.axes = (0, 1, 2)
        self.axis_prob = axis_prob

    def __call__(self, m):
        assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
        self.random_state = np.random.RandomState(self.seed)
        for axis in self.axes:
            if self.random_state.uniform() > self.axis_prob:
                if m.ndim == 3:
                    m = np.flip(m, axis)
                else:
                    channels = [np.flip(m[c], axis) for c in range(m.shape[0])]
                    m = np.stack(channels, axis=0)

        return m


class RandomRotate90:
    """
    Rotate an array by 90 degrees around a randomly chosen plane. Image can be either 3D (DxHxW) or 4D (CxDxHxW).

    When creating make sure that the provided RandomStates are consistent between raw and labeled datasets,
    otherwise the models won't converge.

    IMPORTANT: assumes DHW axis order (that's why rotation is performed across (1,2) axis)
    """

    def __init__(self, seed, **kwargs):
        self.seed = seed
        # always rotate around z-axis
        self.axis = (1, 2)

    def __call__(self, m):
        assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
        self.random_state = np.random.RandomState(self.seed)
        # pick number of rotations at random
        k = self.random_state.randint(0, 4)
        # print(k)
        # rotate k times around a given plane
        if m.ndim == 3:
            m = np.rot90(m, k, axes=self.axis)
        else:
            channels = [np.rot90(m[c], k, axes=self.axis) for c in range(m.shape[0])]
            m = np.stack(channels, axis=0)

        return m


class RandomRotate:
    """
    Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval.
    Rotation axis is picked at random from the list of provided axes.
    """

    def __init__(self, seed, angle_spectrum=30, axes=None, mode='reflect', order=0, **kwargs):
        # if axes is None:
        #     axes = [[2, 1]] # 这样就是以后两个维度为平面进行旋转。 第一个维度是深度
        # else:
        #     assert isinstance(axes, list) and len(axes) > 0

        self.seed = seed
        self.angle_spectrum = angle_spectrum
        self.axes = axes
        self.mode = mode
        self.order = order

    def __call__(self, m):
        # axis = self.axes[self.random_state.randint(len(self.axes))]
        self.random_state = np.random.RandomState(self.seed)
        angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum)

        if m.ndim == 3:
            m = rotate(m, angle, reshape=False, order=self.order, mode=self.mode, cval=-1)
        else:
            channels = [rotate(m[c], angle, reshape=False, order=self.order, mode=self.mode, cval=-1) for c
                        in range(m.shape[0])]
            m = np.stack(channels, axis=0)

        return m

class Transformer:
    def __init__(self):
        # values 里面有四个值 分别为min max mean std 用于归一化图片
        # self.phase_config = phase_config
        # self.config_base = base_config
        self.seed = GLOBAL_RANDOM_STATE.randint(10000000)
        # print("seed is " + str(self.seed))
    def create_transform(self):
        # random_state = np.random.RandomState(self.seed)
        # print("random state is " + str(random_state))
        transformer = [RandomFlip(self.seed), RandomRotate90(self.seed), RandomRotate(self.seed)
           ]
        return Compose([
            c for c in transformer
        ])

class Self2DDataset(Dataset):
    def __init__(self, file_path, phase="train"):
        super(Self2DDataset, self).__init__()
        self.phase = phase
        h5_file = h5py.File(file_path, "r")
        
        self.imgs = h5_file["raw"][()]
        self.labels = h5_file["label"][()]
    
    def __getitem__(self, idx):
        img = self.imgs[idx]
        label = self.labels[idx]
        mean_v = img.mean()
        std_v = img.std()
        img = (img - mean_v) / std_v
        # plt.imshow(img, cmap="gray")
        # plt.show()
        # plt.imshow(label, cmap="gray")
        # plt.show()
        if len(img.shape) == 2:
            img = np.expand_dims(img, axis=0)
           
        if len(label.shape) == 2:
            label = np.expand_dims(label, axis=0)
        
        # if self.phase == "train":
        #     self.transformer = Transformer().create_transform()
        #     img = self.transformer(img)
        #     label = self.transformer(label)
            # img = img.squeeze().squeeze()
            # plt.imshow(img, cmap="gray")
            # plt.show()
            # label = label.squeeze()
            # plt.imshow(label, cmap="gray")
            # plt.show()

        return torch.tensor(img.copy()), torch.tensor(label.copy())

    def __len__(self):
        return len(self.imgs)

def train_net(discriminator, generator):
    # 分割数据集～ 训练集和验证集
    # dataset = BasicDataset(data_dir, img_size=img_size)
    # n_val = int(len(dataset) * val_percent)
    # n_train = len(dataset) - n_val
    # train, val = random_split(dataset, [n_train, n_val])
    train_ds = Self2DDataset(train_file, phase="train")
    val_ds = Self2DDataset(val_file, phase="val")

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False)

    writer = SummaryWriter("./self_data_2d_log/", comment=f'LR_{lr}_BS_{batch_size}')
    global_step = 0

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Training size:   {len(train_ds)}
        Validation size: {len(val_ds)}
        Device:          {device.type}
        Images size:  {img_size}
    ''')

    optimizer_dis = optim.Adam(discriminator.parameters(), lr=lr, weight_decay=1e-5)
    optimizer_gene = optim.Adam(generator.parameters(), lr=lr, weight_decay=1e-5)
    # optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)
    
    criterion = nn.BCEWithLogitsLoss(device=device))
    
    best_dice = 0
    for epoch in range(epochs):
        net.train()
        epoch_loss = 0
        with tqdm(total=len(train_ds), desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
            for img, label in train_loader:
                # continue
                # img = img.squeeze(0)
                # label = label.squeeze(0)
                # plt.imshow(img, cmap="gray")
                # plt.show()
                # plt.imshow(label, cmap="gray")
                # plt.show()
                # continue
                # print(img.shape)
                # print(label.shape)
                batch,channel, W, H = img.shape
                
                img = img.to(device=device, dtype=torch.float32)
                mask_type = torch.float32 if out_channels == 1 else torch.long
                label = label.to(device=device, dtype=mask_type)

                dis_true_out = discriminator(img, label) # 真实判定
                true_label = torch.ones_like(dis_true_out, device=device)
                # masks_pred = masks_pred.view((batch, W, H)) # view 到跟label 一样然后 再去计算loss
                true_loss = criterion(dis_true_out, true_label) # 真实loss

                # 生成一个假的mask 传入图片 生成mask
                fake_mask = generator(img).detach()
                dis_fake_out = discriminator(img, fake_mask)
                fake_label = torch.zeros_like(dis_fake_out, device=device)
                fake_loss = criterion(dis_fake_out, fake_label)
                
                dis_loss = true_loss + fake_loss
                epoch_loss += dis_loss.item()
                writer.add_scalar('Loss/train', dis_loss.item(), global_step)

                pbar.set_postfix(**{'loss (batch)': dis_loss.item()})

                optimizer_dis.zero_grad()
                dis_loss.backward()
                # nn.utils.clip_grad_value_(di.parameters(), 0.1)
                optimizer_dis.step()

                # 生成一个假的mask 传入图片 生成mask
                fake_mask = generator(img)
                dis_fake_out = discriminator(img, fake_mask)

                true_label = torch.ones_like(dis_fake_out, device=device)
                gene_loss = criterion(dis_fake_out, true_label)
                optimizer_gene.zero_grad()
                gene_loss.backward()
                optimizer_gene.step()
                
                # report_loss += loss.item()
                pbar.update(img.shape[0])
                global_step += 1
                
                if global_step % (len(train_ds) // (2 * batch_size)) == 0:
                    # val_score 为平均的dice score
                    val_score = eval_net(generator, val_loader, device)
                    print("dice loss is : " + str(val_score))

                    if best_dice < val_score:
                        best_dice = val_score
                        torch.save(generator.state_dict(),
                                    dir_checkpoint + f'unet2D_self_data.pkl')
                        print(f'Checkpoint {epoch + 1} saved !')
                    # scheduler.step(val_score)
                    # writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], global_step)

                   
                    logging.info('Validation Dice Coeff: {}'.format(val_score))
                    writer.add_scalar('Dice/test', val_score, global_step)
                    
                    # 三维变二维图像
                    img = img.squeeze(2) # 去掉深度维度
                    masks_pred = masks_pred.squeeze(2) # 去掉深度维度
                    writer.add_images('images', img, global_step)
                    if out_channels == 1:
                       
                        writer.add_images('masks/true', label, global_step)
                        writer.add_images('masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)

        print("epoch loss is : " + str(epoch_loss))
        epoch_loss = 0

    writer.close()


if __name__ == '__main__':
   
    logging.info(f'Using device {device}')
    ## 因为输入是灰度图，所以in channels为1，因为是二分类去分割，因此输出channel 也为 1
    net = UNet2D(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid) # 训练阶段 testing = False 最后一层不会使用sigmoid进行激活。
   
    net.to(device=device)
    # faster convolutions, but more memory
    # cudnn.benchmark = True
    
    train_net(net=net)
    
