import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from eval import eval_net
from unet3d.model import UNet2D, UNet3D
import matplotlib.pyplot as plt 
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, random_split
from dataset import BasicDataset
import dataset 
from dice_loss import dice_coeff
from unet3d.losses import get_loss_criterion

train_dir = "./cell_seg_3d_train/"
val_dir = "./cell_seg_3d_val/"

checkpoint_dir = './checkpoints/'
epochs = 100
batch_size = 1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
lr = 0.0001
# 验证集百分比
final_sigmoid = True 
in_channels = 1
out_channels = 1

class RunningAverage:
    """Computes and stores the average
    """

    def __init__(self):
        self.count = 0
        self.sum = 0
        self.avg = 0

    def update(self, value, n=1):
        self.count += n
        self.sum += value * n
        self.avg = self.sum / self.count

class Trainer():
    """
    unet3d trainer
    """
    def __init__(self, net):
        self.net = net 
       
        self.loss_criterion = get_loss_criterion(loss_name="BCEWithLogitsLoss", device=device) # 构建loss
        self.optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-5)
        self.writer = SummaryWriter("./unet3d_log/")

    def _split_training_batch(self, t):
        def _move_to_device(input):
            if isinstance(input, tuple) or isinstance(input, list):
                return tuple([_move_to_device(x) for x in input])
            else:
                return input.to(device)
        t = _move_to_device(t)
        weight = None
        if len(t) == 2:
            input, target = t
        else:
            input, target, weight = t
        return input, target, weight

    def train_net(self):
        # 训练
        net.train()
        train_losses = RunningAverage()
        # 加载已经训练完的参数
        if os.path.exists(checkpoint_dir + "unet3d_model.pkl"):
            self.net.load_state_dict(torch.load(checkpoint_dir + "unet3d_model.pkl", map_location=device))
            
        self.step = 0
        best_dice = 0
        for epoch in range(epochs):
            ## 改成临时加载两个训练数据，这样节约空间，要不会爆内存
            loader = dataset.get_train_loaders(train_dir, val_dir)
            self.train_loader = loader["train"]
            self.val_loader = loader["val"]
            for i, t in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
                self.step += 1
                # print(
                    # f'Training iteration {self.step}. Batch {i}. Epoch [{epoch}/{epochs - 1}]')

                # input, target, weight = self._split_training_batch(t)
                weight = None 
                input, target = t
                input = input.to(device)
                target = target.to(device)
                output = self.net(input)
                # compute the loss

                if weight is None:
                    loss = self.loss_criterion(output, target)
                else:
                    loss = self.loss_criterion(output, target, weight)

                train_losses.update(loss.item(), batch_size)
                # compute gradients and update parameters
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                if self.step % 1000 == 0:
                    # set the model in eval mode
                    self.net.eval()
                    # evaluate on validation set
                    # eval_score = self.validate(self.loaders['val'])
                    val_loss, dice_value = self.validate(self.val_loader) # 打印一下loss
                    # set the model back to training mode
                    train_loss_avg = train_losses.avg
                    self.writer.add_scalar("train/loss", train_losses.avg, self.step // 1000)
                    self.writer.add_scalar("val/loss", val_loss, self.step // 1000)
                    self.writer.add_scalar("dice/loss", dice_value, self.step // 1000)
                    self.net.train()

                    # # adjust learning rate if necessary
                    # if isinstance(self.scheduler, ReduceLROnPlateau):
                    #     self.scheduler.step(eval_score)
                    # else:
                    #     self.scheduler.step()
                    # log current learning rate in tensorboard
                    # self._log_lr()
                    # remember best validation metric
                    # is_best = self._is_best_eval_score(eval_score)

                    # save checkpoint
                    # self._save_checkpoint(is_best)
                    if not os.path.exists("./checkpoints/"):
                        os.mkdir("./checkpoints/")
                    if dice_value > best_dice:
                        torch.save(self.net.state_dict(), checkpoint_dir + "unet3d_model.pkl")
                        

    def validate(self, val_loader):
            print('Validating...')

            val_losses = RunningAverage()
            val_scores = RunningAverage()

            with torch.no_grad():
                for i, t in tqdm(enumerate(self.val_loader), total=len(self.val_loader)):
                    # print(f'Validation iteration {i}')
                    input, target, weight = self._split_training_batch(t)
                    output = self.net(input)
                    # compute the loss
                    if weight is None:
                        loss = self.loss_criterion(output, target)
                    else:
                        loss = self.loss_criterion(output, target, weight)

                    val_losses.update(loss.item(), batch_size)

                    # if model contains final_activation layer for normalizing logits apply it, otherwise
                    # the evaluation metric will be incorrectly computed
                    if hasattr(self.net, 'final_activation') and self.net.final_activation is not None:
                        output = self.net.final_activation(output)
                        # 算一下dice loss
                        dice_loss_v = dice_coeff(output, target)
                        val_scores.update(dice_loss_v, batch_size)

                    # if i % 100 == 0:
                    #     self._log_images(input, target, output, 'val_')

                    # eval_score = self.eval_criterion(output, target)
                    # val_scores.update(eval_score.item(), batch_size)

                    # if self.validate_iters is not None and self.validate_iters <= i:
                    #     # stop validation
                    #     break

                # self._log_stats('val', val_losses.avg, val_scores.avg)
                # logger.info(f'Validation finished. Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')
                print(f'Validation finished. Loss: {val_losses.avg}. Dice Loss: {val_scores.avg}')
                return val_losses.avg, val_scores.avg
                # return val_scores.avg


if __name__ == '__main__':
   
    print(f'Using device {device}')

    ## 因为输入是灰度图，所以in channels为1，因为是二分类去分割，因此输出channel 也为 1
    net = UNet3D(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid) # 训练阶段 testing = False 最后一层不会使用sigmoid进行激活。
   
    net.to(device=device)
    # faster convolutions, but more memory
    # cudnn.benchmark = True

    # try:
    trainer = Trainer(net)
    trainer.train_net()
    # except KeyboardInterrupt:
    #     torch.save(net.state_dict(), 'INTERRUPTED.pth')
    #     logging.info('Saved interrupt')
    #     try:
    #         sys.exit(0)
    #     except SystemExit:
    #         os._exit(0)
