import random
from os.path import join
from lib.extract_patches import get_data_train
from lib.losses.loss import *
from lib.visualize import group_images, save_img
from lib.common import *
from lib.dataset import TrainDataset
from torch.utils.data import DataLoader
from collections import OrderedDict
from lib.metrics import Evaluate
from lib.visualize import group_images, save_img
from lib.extract_patches import get_data_train
from lib.datasetV2 import data_preprocess, create_patch_idx, TrainDatasetV2, TrainDatasetVal
from tqdm import tqdm
from utils.func import loss_calc, bce_loss
from torch.nn import BCEWithLogitsLoss, MSELoss, CrossEntropyLoss
from utils.util import compute_sdf
from utils import ramps, losses, metrics
import torch
import torch.nn.functional as F
from utils.func import prob_2_entropy,prob_2_entropy2
import torch.optim as optim
from models.discriminator import get_fc_discriminator,get_fc_discriminator2

# ========================get dataloader==============================
def get_dataloader(args):
    """
    该函数将数据集加载并直接提取所有训练样本图像块到内存，所以内存占用率较高，容易导致内存溢出
    """
    patches_imgs_train, patches_masks_train = get_data_train(
        data_path_list = args.train_data_path_list,
        patch_height = args.train_patch_height,
        patch_width = args.train_patch_width,
        N_patches = args.N_patches,
        inside_FOV = args.inside_FOV #select the patches only inside the FOV  (default == False)
    )
    val_ind = random.sample(range(patches_masks_train.shape[0]),int(np.floor(args.val_ratio*patches_masks_train.shape[0])))
    train_ind =  set(range(patches_masks_train.shape[0])) - set(val_ind)
    train_ind = list(train_ind)

    train_set = TrainDataset(patches_imgs_train[train_ind,...],patches_masks_train[train_ind,...],mode="train")
    train_loader = DataLoader(train_set, batch_size=args.batch_size,
                              shuffle=True, num_workers=6)

    val_set = TrainDataset(patches_imgs_train[val_ind,...],patches_masks_train[val_ind,...],mode="val")
    val_loader = DataLoader(val_set, batch_size=args.batch_size,
                            shuffle=False, num_workers=6)
    # Save some samples of feeding to the neural network
    if args.sample_visualization:
        N_sample = min(patches_imgs_train.shape[0], 50)
        save_img(group_images((patches_imgs_train[0:N_sample, :, :, :]*255).astype(np.uint8), 10),
                join(args.outf, args.save, "sample_input_imgs.png"))
        save_img(group_images((patches_masks_train[0:N_sample, :, :, :]*255).astype(np.uint8), 10),
                join(args.outf, args.save,"sample_input_masks.png"))
    return train_loader,val_loader


def get_dataloaderV2(args):
    """
    该函数加载数据集所有图像到内存，并创建训练样本提取位置的索引，所以占用内存量较少，
    测试结果表明，相比于上述原始的get_dataloader方法并不会降低训练效率
    """
    imgs_train, masks_train, fovs_train , unimgs_train, unmasks_train, unfovs_train = data_preprocess(data_path_list = args.train_data_path_list)

    patches_idx = create_patch_idx(fovs_train, args)
    unpatches_idx = create_patch_idx(unfovs_train, args)

    train_idx,val_idx = np.vsplit(patches_idx, (int(np.floor((1-args.val_ratio)*patches_idx.shape[0])),))
    untrain_idx, unval_idx = np.vsplit(unpatches_idx, (int(np.floor((1 - args.val_ratio) * unpatches_idx.shape[0])),))


    train_set = TrainDatasetV2(imgs_train, masks_train, fovs_train,train_idx,mode="train",args=args, unimgs=unimgs_train, unfovs=unfovs_train, unpatches_idx=unpatches_idx)
    train_loader = DataLoader(train_set, batch_size=args.batch_size,
                              shuffle=True, num_workers=6)

    val_set = TrainDatasetVal(imgs_train, masks_train, fovs_train,val_idx,mode="val",args=args)
    val_loader = DataLoader(val_set, batch_size=args.batch_size,
                            shuffle=False, num_workers=6)

    # Save some samples of feeding to the neural network
    if args.sample_visualization:
        visual_set = TrainDatasetVal(imgs_train, masks_train, fovs_train,val_idx,mode="val",args=args)
        visual_loader = DataLoader(visual_set, batch_size=1,shuffle=True, num_workers=0)
        N_sample = 50
        visual_imgs = np.empty((N_sample,1,args.train_patch_height, args.train_patch_width))
        visual_masks = np.empty((N_sample,1,args.train_patch_height, args.train_patch_width))

        for i, (img, mask,) in tqdm(enumerate(visual_loader)):
            visual_imgs[i] = np.squeeze(img.numpy(),axis=0)
            visual_masks[i,0] = np.squeeze(mask.numpy(),axis=0)
            if i>=N_sample-1:
                break
        save_img(group_images((visual_imgs[0:N_sample, :, :, :]*255).astype(np.uint8), 10),
                join(args.outf, args.save, "sample_input_imgs.png"))
        save_img(group_images((visual_masks[0:N_sample, :, :, :]*255).astype(np.uint8), 10),
                join(args.outf, args.save,"sample_input_masks.png"))
    return train_loader,val_loader

def get_current_consistency_weight(epoch):
    # Consistency ramp-up from https://arxiv.org/abs/1610.02242
    return 1.0 * ramps.sigmoid_rampup(epoch, 40.0)

# =======================train======================== 
def train(train_loader,net,criterion,optimizer,device,iter_num):
    net.train()
    train_loss = AverageMeter()
    mse_loss = MSELoss()

    for batch_idx, (inputs, targets, uninputs) in tqdm(enumerate(train_loader), total=len(train_loader)):

        inputs, targets, uninputs = inputs.to(device), targets.to(device), uninputs.to(device)
        optimizer.zero_grad()

        _,levelset,outputs = net(inputs)
        output = torch.sigmoid(outputs)

        loss = criterion(output, targets)
        loss.backward()
        optimizer.step()

        train_loss.update(loss.item(), inputs.size(0))
    log = OrderedDict([('train_loss',train_loss.avg)])
    return log

# =======================train========================
def trainGAN(train_loader,net,criterion,optimizer,device,iter_num):
    net.train()
    train_loss = AverageMeter()
    mse_loss = MSELoss()

    d_aux = get_fc_discriminator(num_classes=1)
    d_aux.train()
    d_aux.to(device)

    # seg maps, i.e. output, level
    d_main = get_fc_discriminator2(num_classes=1)
    d_main.train()
    d_main.to(device)

    d_dual = get_fc_discriminator2(num_classes=1)
    d_dual.train()
    d_dual.to(device)

    optimizer_d_aux = optim.Adam(d_aux.parameters(), lr=1e-4,
                                 betas=(0.9, 0.99))
    optimizer_d_main = optim.Adam(d_main.parameters(), lr=1e-4,
                                  betas=(0.9, 0.99))
    optimizer_d_dual = optim.Adam(d_dual.parameters(), lr=1e-4,
                                  betas=(0.9, 0.99))

    source_label = 0
    target_label = 1

    for batch_idx, (inputs, targets, uninputs) in tqdm(enumerate(train_loader), total=len(train_loader)):

        inputs, targets, uninputs = inputs.to(device), targets.to(device), uninputs.to(device)
        optimizer.zero_grad()
        optimizer_d_aux.zero_grad()
        optimizer_d_main.zero_grad()

        # UDA Training
        # only train segnet. Don't accumulate grads in disciminators
        for param in d_aux.parameters():
            param.requires_grad = False
        for param in d_main.parameters():
            param.requires_grad = False
        for param in d_dual.parameters():
            param.requires_grad = False

        # train on source
        pred_src_aux, outputs_src_tanh, pred_src_main = net(inputs)
        output_src = torch.sigmoid(pred_src_main)
        label_batch = targets  # .unsqueeze(1)
        with torch.no_grad():
            gt_dis = compute_sdf(label_batch.cpu().numpy(), output_src.shape)
            gt_dis_src = torch.from_numpy(gt_dis).float().cuda()

        #d_out_aux = d_aux(pred_src_aux)
        #loss_seg_src_aux = bce_loss(d_out_aux, target_label)

        loss_sdf = mse_loss(outputs_src_tanh, gt_dis_src)
        loss_seg_src_main = criterion(output_src, targets)

        dis_to_mask_src = torch.sigmoid(-1500 * outputs_src_tanh)
        pred_entropy = prob_2_entropy(pred_src_main)
        dis_entropy = prob_2_entropy2(F.softmax(dis_to_mask_src))
        src_entropy = torch.cat((pred_entropy, dis_entropy), 1)
        '''consistency_loss = torch.mean((dis_to_mask_src - output_src[:,1]) ** 2)
        pred_src_dual = output_src[:,1].unsqueeze(1)
        d_out_dual = d_dual(prob_2_entropy(F.softmax(pred_src_dual)))
        loss_adv_src_dual = bce_loss(d_out_dual, target_label)'''

        '''d_out_main_dis = d_main(dis_to_mask_src)
        d_out_main_mask = d_main(output_src[:,1].unsqueeze(1))
        loss_adv_src_main = bce_loss(d_out_main_dis, 1) + bce_loss(d_out_main_mask, 0)'''
        #consistency_weight = get_current_consistency_weight(iter_num // 10)

        loss = 1.0 * loss_seg_src_main + 0.3 * loss_sdf #+ 0.1 * consistency_loss
        loss.backward()


        # train on target
        # adversarial training ot fool the discriminator
        pred_trg_aux, outputs_trg_tanh, pred_trg_main = net(uninputs)
        #output_trg = torch.sigmoid(pred_trg_main)
        d_out_aux = d_aux(pred_trg_aux)
        loss_adv_trg_aux = bce_loss(d_out_aux, source_label)

        dis_to_mask_trg = torch.sigmoid(-1500 * outputs_trg_tanh)
        #pred_trg_dual = output_trg[:,1].unsqueeze(1)
        pred_entropy = prob_2_entropy(pred_trg_main)
        dis_entropy = prob_2_entropy2(F.softmax(dis_to_mask_trg))
        trg_entropy = torch.cat((pred_entropy, dis_entropy), 1)
        d_out_dual = d_dual(trg_entropy)
        loss_adv_trg_dual = bce_loss(d_out_dual, source_label)

        '''d_out_main_dis_trg = d_main(dis_to_mask_trg)
        d_out_main_mask_trg = d_main(output_trg[:,1].unsqueeze(1))
        loss_adv_trg_main = bce_loss(d_out_main_dis_trg, 1) + bce_loss(d_out_main_mask_trg, 0)'''

        loss = 0.01 * loss_adv_trg_aux + 0.01 * loss_adv_trg_dual

        #loss = supervised_loss + consistency_weight * consistency_loss
        loss.backward()


        # Train discriminator networks
        # enable training mode on discriminator networks
        for param in d_aux.parameters():
            param.requires_grad = True
        for param in d_main.parameters():
            param.requires_grad = True
        for param in d_dual.parameters():
            param.requires_grad = True

        # train with source
        src_entropy = src_entropy.detach()
        d_out_dual = d_dual(src_entropy)
        loss_d_dual = bce_loss(d_out_dual, source_label)
        loss_d_dual = loss_d_dual / 2
        loss_d_dual.backward()
        if True:
            pred_src_aux = pred_src_aux.detach()
            d_out_aux = d_aux(pred_src_aux)
            loss_d_aux = bce_loss(d_out_aux, source_label)
            loss_d_aux = loss_d_aux / 2
            loss_d_aux.backward()
        '''dis_to_mask_src = dis_to_mask_src.detach()
        output_src = output_src.detach()
        d_out_main_dis_trg = d_main(dis_to_mask_src)
        d_out_main_mask_trg = d_main(output_src[:, 1].unsqueeze(1))
        loss_d_main = bce_loss(d_out_main_dis_trg, 1) + bce_loss(d_out_main_mask_trg, 0)
        loss_d_main = loss_d_main / 2
        loss_d_main.backward()'''

        # train with target
        trg_entropy = trg_entropy.detach()
        d_out_dual = d_dual(trg_entropy)
        loss_d_dual = bce_loss(d_out_dual, target_label)
        loss_d_dual = loss_d_dual / 2
        loss_d_dual.backward()
        if True:
            pred_trg_aux = pred_trg_aux.detach()
            d_out_aux = d_aux(pred_trg_aux)
            loss_d_aux = bce_loss(d_out_aux, target_label)
            loss_d_aux = loss_d_aux / 2
            loss_d_aux.backward()


        optimizer.step()

        optimizer_d_dual.step()
        optimizer_d_aux.step()
        #optimizer_d_main.step()

        current_losses = {
                          'loss_sdf': loss_sdf,
                          'loss_seg_src_main': loss_seg_src_main,
                           'loss_adv_trg_aux': loss_adv_trg_aux,
                          'loss_d_aux': loss_d_aux,
                          'loss_adv_trg_dual': loss_adv_trg_dual,
                          #'loss_d_dual': loss_d_dual
                          }
        if(int(loss_seg_src_main*1000)%10==0):
            print_losses(current_losses,iter_num)

        train_loss.update(loss.item(), inputs.size(0))
    log = OrderedDict([('train_loss',train_loss.avg)])
    return log


# ========================val=============================== 
def val(val_loader,net,criterion,device):
    net.eval()
    val_loss = AverageMeter()
    evaluater = Evaluate()
    with torch.no_grad():
        for batch_idx, (inputs, targets) in tqdm(enumerate(val_loader), total=len(val_loader)):
            inputs, targets = inputs.to(device), targets.to(device)
            _,_,outputs = net(inputs)
            output = torch.sigmoid(outputs)

            #output = output.view(output.size(0), -1).float()
            #target = targets.view(targets.size(0), -1).float()
            loss = criterion(output, targets)
            val_loss.update(loss.item(), inputs.size(0))

            outputs = outputs.data.cpu().numpy()
            targets = targets.data.cpu().numpy()
            evaluater.add_batch(targets,outputs[:,1])
    log = OrderedDict([('val_loss', val_loss.avg), 
                       ('val_acc', evaluater.confusion_matrix()[1]), 
                       ('val_f1', evaluater.f1_score()),
                       ('val_auc_roc', evaluater.auc_roc())])
    return log

def print_losses(current_losses, i_iter):
    list_strings = []
    for loss_name, loss_value in current_losses.items():
        list_strings.append(f'{loss_name} = {to_numpy(loss_value):.3f} ')
    full_string = ' '.join(list_strings)
    tqdm.write(f'iter = {i_iter} {full_string}')
def to_numpy(tensor):
    if isinstance(tensor, (int, float)):
        return tensor
    else:
        return tensor.data.cpu().numpy()