import math
import numpy as np
import cv2
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.nn.functional as F
import os
import sys
import torch
import torch.nn as nn

from models.mul_loss import pytorch_ssim, pytorch_iou

# print(sys.path)
# print(os.getcwd())
from datasets.datasets.table.dataloader import LoadTableImageAndLabels
from models.assembly.mul_loss_model import PanModel


def save_tensor(tensor, i, e):
    np_array = tensor[0].detach().cpu().numpy().transpose(1, 2, 0)
    #     mx, mn = np_array.max(), np_array.min()
    #     arr = (np_array - mn) / (mx - mn) * 255
    np_array = np.array(np_array * 255, np.uint8)[:, :, 0]
    # np_array = cv2.resize(np_array, (320, 200))
    cv2.imwrite('results/' + str(e) + '-' + str(i) + '.jpg', np_array)

class BCELoss2d(nn.Module):
    def __init__(self, weight=None, size_average=True):
        super(BCELoss2d, self).__init__()
        self.bce_loss = nn.BCELoss(weight, size_average)

    def forward(self, logits, targets):
        # probs = F.sigmoid(logits)  # 二分类问题，sigmoid等价于softmax
        probs_flat = logits.view(-1)
        targets_flat = targets.view(-1)
        return self.bce_loss(probs_flat, targets_flat)


class BCEFocalLoss(torch.nn.Module):

    def __init__(self, gamma=2, alpha=None, reduction='elementwise_mean'):
        super().__init__()
        self.gamma = gamma
        self.alpha = alpha
        self.reduction = reduction

    def forward(self, _input, target):
        # pt = torch.sigmoid(_input)
        pt = _input
        loss = - (1 - pt) ** self.gamma * target * torch.log(pt) - \
               pt ** self.gamma * (1 - target) * torch.log(1 - pt)
        if self.alpha:
            loss = loss * self.alpha
        if self.reduction == 'elementwise_mean':
            loss = torch.mean(loss)
        elif self.reduction == 'sum':
            loss = torch.sum(loss)
        return loss


def balance_mask(score, label, mask):
    pos_num = label[label > 0.5].numel()  # 返回元数个数
    selected_mask = torch.zeros_like(label)
    if pos_num == 0:
        selected_mask = torch.ones_like(label)
        return selected_mask
    selected_mask[label > 0.5] = 1.0
    # 正负样本比例为1：3
    neg_num = label[label <= 0.5].numel()
    neg_num = (int)(min(pos_num * 3, neg_num))
    # 负样本在正例的基础上膨胀几个像素点。取个数
    # neg_num = mask[mask > 0.5].numel()
    if neg_num == 0:
        return selected_mask
    neg_score = score[label <= 0.5]
    neg_score_sorted = torch.sort(-neg_score)
    threshold = -neg_score_sorted[0][neg_num - 1]
    selected_mask[score >= threshold] = 1.0
    #
    # selected_mask[score <= 0.5] = 0.0
    # total_num = selected_mask[selected_mask > 0.0].numel()
    return selected_mask, pos_num + neg_num
    # return selected_mask, total_num


def hard_mining(outputs, targets, mask):
    # 第一种方式
    balan_mask, total_num = balance_mask(outputs, targets, mask)
    loss = F.binary_cross_entropy(outputs, targets, reduction='none')
    # pos_loss = loss.mul(mask).mean()#与loss做点乘之后去均值
    # 第二种方式：
    loss = (0.7 * ((loss * 2).mul(balan_mask).sum() / total_num).mean() + 0.3 * loss.mean()).mean()

    return loss


def hard_mining1(outputs, targets):
    # aa = nn.CrossEntropyLoss()
    loss = nn.CrossEntropyLoss(outputs, targets)
    # loss = F.binary_cross_entropy(outputs, targets, reduction='none')
    _, topk_loss_inds = loss.reshape(-1).topk(loss.reshape(-1).numel() // 10)
    return loss.reshape(-1)[topk_loss_inds].mean()


class SoftDiceLoss(torch.nn.Module):
    def __init__(self, weight=None, size_average=True):
        super(SoftDiceLoss, self).__init__()

    def forward(self, logits, targets):
        num = targets.size(0)
        smooth = 1
        logits[logits < 0.5] = 0
        # probs = F.sigmoid(logits)
        m1 = logits.view(num, -1)
        m2 = targets.view(num, -1)
        m3 = m1.clone()
        m3[m3 <= 0.5] = 0.0
        intersection = (m3 * m2)
        score = 2. * (intersection.sum(1) + smooth) / (m3.sum(1) + m2.sum(1) + smooth)

        # intersection = (m1 * m2)
        # score = 2. * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum(1) + smooth)
        score = 1 - score.sum() / num
        return score


class TverskyLoss(torch.nn.Module):
    def __init__(self, weight=None, size_average=True):  # https://zhuanlan.zhihu.com/p/103426335
        super(TverskyLoss, self).__init__()

    def forward(self, logits, targets):
        num = targets.size(0)
        smooth = 1

        # probs = F.sigmoid(logits)
        m1 = logits.view(num, -1)
        m2 = targets.view(num, -1)
        true_pos = (m2 * m1).sum(1)
        false_neg = (m2 * (1 - m1)).sum(1)
        false_pos = ((1 - m2) * m1).sum(1)

        alpha = 0.7
        score = (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
        return score.sum() / num

bce_loss = nn.BCELoss(size_average=True)
ssim_loss = pytorch_ssim.SSIM(window_size=11,size_average=True)
iou_loss = pytorch_iou.IOU(size_average=True)
class SoftDiceLoss(torch.nn.Module):
    def __init__(self, weight=None, size_average=True):
        super(SoftDiceLoss, self).__init__()

    def forward(self, logits, targets):
        num = targets.size(0)
        smooth = 1
        logits[logits<0.5] = 0
        # probs = F.sigmoid(logits)
        m1 = logits.view(num, -1)
        m2 = targets.view(num, -1)
        m3 = m1.clone()
        m3[m3<=0.5] = 0.0
        intersection = (m3 * m2)
        score = 2. * (intersection.sum(1) + smooth) / (m3.sum(1) + m2.sum(1) + smooth)

        # intersection = (m1 * m2)
        # score = 2. * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum(1) + smooth)
        score = 1 - score.sum() / num
        return score
loss_softdice = SoftDiceLoss()
def bce_ssim_loss(pred,target):

    bce_out = bce_loss(pred,target)
    ssim_out =loss_softdice(pred,target)
    # ssim_out = 1 - ssim_loss(pred,target)
    iou_out = iou_loss(pred,target)
    loss = bce_out + ssim_out + iou_out
    return loss

def muti_bce_loss_fusion(c2, c3, c4, c5, fy, labels_v,loss_softdice):
    # print(c2.shape,labels_v.shape)
    loss2 = bce_ssim_loss(c2,labels_v)
    loss3 = bce_ssim_loss(c3,labels_v)
    loss4 = bce_ssim_loss(c4,labels_v)
    loss5 = bce_ssim_loss(c5,labels_v)
    lossfy = bce_ssim_loss(fy,labels_v)
    # lossfy = loss_softdice(fy, labels_v)
    # print("l2: %3f"%(lossfy.data[0]))
    # iou0 = iou_loss(d0,labels_v)
    #loss = torch.pow(torch.mean(torch.abs(labels_v-d0)),2)*(5.0*loss0 + loss1 + loss2 + loss3 + loss4 + loss5) #+ 5.0*lossa
    loss = loss2 + loss3 + loss4 + loss5 + lossfy#+ 5.0*lossa
    print("l2: %3f, l3: %3f, l4: %3f, l5: %3f, lfy: %3f\n"%(loss2.data,loss3.data,loss4.data,loss5.data,lossfy.data))
    # print("BCE: l1:%3f, l2:%3f, l3:%3f, l4:%3f, l5:%3f, la:%3f, all:%3f\n"%(loss1.data[0],loss2.data[0],loss3.data[0],loss4.data[0],loss5.data[0],lossa.data[0],loss.data[0]))

    return loss

def main():
    # data_fd = 'D:/Projects_data/table/merge_data_mask_v1'
    data_fd = 'mask_label/aug'
    # data_fd = '/mnt/data/xp/datasets/table/images'
    # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    device = torch.device('cpu')
    dataset = LoadTableImageAndLabels(data_fd)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=0, shuffle=True, pin_memory=True)
    model = PanModel().to(device)
    epochs = 200
    loss_softdice = SoftDiceLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.0001, weight_decay=5e-4)

    lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.9 + 0.1  # cosine
    # scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # scheduler.last_epoch = epochs - 1  # do not move
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[3, 7, 13], gamma=0.1)
    for epoch in range(1, epochs):
        model.train()
        print('epoch: ', epoch)
        for i, (imgs, targets, mask) in enumerate(dataloader):
            imgs = imgs.to(device).float()
            c2,c3,c4,c5,FY = model(imgs)
            with torch.autograd.set_detect_anomaly(True):
                loss = muti_bce_loss_fusion(c2,c3,c4,c5,FY,  targets.to(device),loss_softdice)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            # if i % 10 == 0:
            #     save_tensor(outputs, i, epoch)
            #     print('loss is:', loss.cpu().item())
            #     # print(i, pred.max(), pred.min(), loss)
        scheduler.step()
        if epoch and epoch % 2 == 0:
            torch.save(model.state_dict(), './ckpt/hard_mining_' + str(epoch) + '.pth')


if __name__ == '__main__':
    main()
