import argparse
import logging
import os
import sys

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from tqdm import tqdm

from eval import eval_net
from unet import UNet

from torch.utils.tensorboard import SummaryWriter
from utils.dataset import BasicDataset
from torch.utils.data import DataLoader, random_split

dir_img = '../datasets/images/'
dir_mask = '../datasets/labels/'
dir_checkpoint = 'checkpoints/'


def log(name, _tensor: torch.Tensor):
    tensor = _tensor.clone()
    tensor = tensor.detach().to('cpu').numpy()
    with open(name, 'w') as f:
        for x in tensor:
            f.write(str(x) + '\n')


class focal_loss(nn.Module):
    def __init__(self, alpha, gamma=2, num_classes=3, size_average=True):
        """
        focal_loss损失函数, -α(1-yi)**γ *ce_loss(xi,yi)
        步骤详细的实现了 focal_loss损失函数.
        :param alpha:   阿尔法α,类别权重.      当α是列表时,为各类别权重,当α为常数时,类别权重为[α, 1-α, 1-α, ....],常用于 目标检测算法中抑制背景类 , retainnet中设置为0.25
        :param gamma:   伽马γ,难易样本调节参数. retainnet中设置为2
        :param num_classes:     类别数量
        :param size_average:    损失计算方式,默认取均值
        """
        super(focal_loss, self).__init__()
        self.size_average = size_average
        self.num_classes = num_classes
        if isinstance(alpha, list):
            assert len(alpha) == num_classes  # α可以以list方式输入,size:[num_classes] 用于对不同类别精细地赋予权重
            self.alpha = torch.Tensor(alpha).reshape(-1, 1)
        else:
            assert alpha < 1  # 如果α为一个常数,则降低第一类的影响,在目标检测中为第一类
            self.alpha = torch.zeros([num_classes, 1])
            self.alpha[0] += alpha
            self.alpha[1:] += (1 - alpha)  # α 最终为 [ α, 1-α, 1-α, 1-α, 1-α, ...] size:[num_classes]
        # print(" --- Focal_loss alpha = ", self.alpha)
        self.gamma = gamma

    def forward(self, _preds: torch.Tensor, _labels):
        """
        focal_loss损失计算
        :param preds:   预测类别. size:[B,N,C] or [B,C]    分别对应与检测与分类任务, B 批次, N检测框数, C类别数
        :param labels:  实际类别. size:[B,N] or [B]
        :return:
        """
        B, C, W, H = _preds.size()
        preds = _preds.transpose(1, 3).reshape([-1, C])

        preds_softmax = F.softmax(preds, dim=1)

        labels = _labels.reshape([-1, 1])
        labels_onehot = torch.zeros_like(preds).scatter_(1, labels.long(), 1)

        pd = preds_softmax * labels_onehot
        nll = -torch.log(preds_softmax + 1e-10) * labels_onehot

        loss = torch.pow((1 - pd), self.gamma) * nll
        self.alpha = self.alpha.to(loss.device)
        loss = torch.matmul(loss, self.alpha)

        # log("results/preds.txt", preds)
        # log("results/softmax.txt", preds_softmax)
        # log("results/labels.txt", labels_onehot)
        # log("results/nll.txt", nll)
        # log("results/focal_loss.txt", loss)
        # raise

        loss = loss.mean()

        return loss


def compute_mIOU(pred, label):
    pred = np.argmax(pred, axis=0)
    classes = np.unique(label).astype(np.int)
    if len(classes) < 2:
        print(classes)
        raise
    ious = [0, 0, 0, 0]
    for c in list(classes):
        pred_c = pred == c
        label_c = label == c
        intersection = np.logical_and(pred_c, label_c).sum()
        union = np.logical_or(pred_c, label_c).sum()
        iou = intersection / union
        ious[c] = iou
    return ious


def train_net(net,
              device,
              epochs=5,
              batch_size=1,
              lr=0.001,
              val_percent=0.1,
              save_cp=True,
              img_scale=0.5):
    dataset = BasicDataset(dir_img, dir_mask, img_scale)
    n_val = int(len(dataset) * val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])
    train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
    val_loader = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True, drop_last=True)

    writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
    global_step = 0

    logging.info(f'''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Training size:   {n_train}
        Validation size: {n_val}
        Checkpoints:     {save_cp}
        Device:          {device.type}
        Images scaling:  {img_scale}
    ''')

    for g in range(epoch, epochs):
        record_loss = []
        record_miou = []
        net.train()
        with tqdm(total=n_train, desc=f'Epoch {g + 1}/{epochs}', unit='img') as pbar:
            for batch in train_loader:
                imgs = batch['image'].to(device=device) / 255.0
                true_masks = batch['mask'].to(device=device)

                masks_pred = net(imgs)

                true_masks = true_masks.to(device=device)
                true_masks = true_masks.squeeze_(0)
                loss = loss_function(masks_pred, true_masks)
                record_loss.append(loss.item())
                writer.add_scalar('Loss/train', loss.item(), global_step)
                pbar.set_postfix(**{'train_loss': loss.item()})

                preds_np = masks_pred.detach().cpu().squeeze().numpy()
                labels_np = true_masks.detach().cpu().squeeze().numpy()
                iou = compute_mIOU(preds_np, labels_np)
                record_miou.append(iou)

                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_value_(net.parameters(), 0.1)
                optimizer.step()

                pbar.update(imgs.shape[0])

                # global_step += 1
                # if global_step % (n_train // (3 * batch_size)) == 0:
                #     val_score = eval_net(net, val_loader, device)
        scheduler.step()
        print("lr = ", optimizer.param_groups[0]['lr'])
        print(f'Train Loss: {np.mean(record_loss):.3f}, miou:', np.mean(record_miou, axis=0))

        record_loss = []
        record_miou = []
        net.eval()
        for batch in val_loader:
            images = batch['image'].to(device) / 255.0
            labels = batch['mask'].to(device)
            with torch.no_grad():
                outputs = net(images)
                labels = labels.squeeze_(0)
                # print(outputs.shape)
                # print(labels.shape)
                loss = loss_function(outputs, labels)
                record_loss.append(loss.item())
                preds_np = outputs.detach().cpu().squeeze().numpy()
                labels_np = labels.detach().cpu().squeeze().numpy()
                iou = compute_mIOU(preds_np, labels_np)
                record_miou.append(iou)
        print(f'Validation Loss: {np.mean(record_loss):.3f}, miou:', np.mean(record_miou, axis=0))

        save_path = dir_checkpoint + f'checkpoint_epoch_{g:04d}.pth'
        save_info = {'epoch': g,
                     'model_state_dict': net.state_dict(),
                     'optimizer_state_dict': optimizer.state_dict(),
                     'lr_scheduler': scheduler.state_dict(),
                     'loss': np.mean(record_loss), }
        torch.save(save_info, save_path)

    writer.close()


def get_args():
    # parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
    #                                  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # parser.add_argument('-e', '--epochs', metavar='E', type=int, default=5,
    #                     help='Number of epochs', dest='epochs')
    # parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=1,
    #                     help='Batch size', dest='batchsize')
    # parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.0001,
    #                     help='Learning rate', dest='lr')
    # parser.add_argument('-f', '--load', dest='load', type=str, default=False,
    #                     help='Load model from a .pth file')
    # parser.add_argument('-s', '--scale', dest='scale', type=float, default=0.5,
    #                     help='Downscaling factor of the images')
    # parser.add_argument('-v', '--validation', dest='val', type=float, default=10.0,
    #                     help='Percent of the data that is used as validation (0-100)')

    class Temp(object):
        def __init__(self):
            self.epochs = 2000
            self.batchsize = 1
            self.lr = 0.05
            self.load = "checkpoints/CP_epoch86.pth"
            self.scale = 0.5
            self.val = 10
            self.class_nums = 4

    return Temp()


def find_last_pt(path):
    checkpoints = os.listdir(path)
    checkpoints = sorted(checkpoints)
    return path + checkpoints[-1]


if __name__ == '__main__':
    torch.set_printoptions(sci_mode=False)
    np.set_printoptions(threshold=sys.maxsize, precision=3)

    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    args = get_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f'Using device {device}')

    # Change here to adapt to your data
    # n_channels=3 for RGB images
    # n_classes is the number of probabilities you want to get per pixel
    #   - For 1 class and background, use n_classes=1
    #   - For 2 classes, use n_classes=1
    #   - For N > 2 classes, use n_classes=N
    net = UNet(n_channels=3, n_classes=args.class_nums, bilinear=True)
    logging.info(f'Network:\n'
                 f'\t{net.n_channels} input channels\n'
                 f'\t{net.n_classes} output channels (classes)\n'
                 f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling')

    alpha = [0.1, 6, 2, 3]
    gamma = 2
    loss_function = focal_loss(alpha=alpha, gamma=gamma, num_classes=net.n_classes, size_average=True)
    optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=1e-8)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.6)

    # resume
    checkpoint = torch.load(find_last_pt(dir_checkpoint))
    net.load_state_dict(checkpoint['model_state_dict'])
    net = net.to(device=device)
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    scheduler.load_state_dict(checkpoint['lr_scheduler'])
    epoch = checkpoint['epoch'] + 1

    train_net(net=net,
              epochs=args.epochs,
              batch_size=args.batchsize,
              lr=args.lr,
              device=device,
              img_scale=args.scale,
              val_percent=args.val / 100)
