from utils.tools import *
from network import *

import os
import torch
import torch.optim as optim
import torch.nn.functional as F
import time
import numpy as np

import kornia
from skimage import util
from skimage import filters
from torchvision.transforms.functional import rotate as rotate
from torchvision.transforms.functional import affine as affine
from torch.autograd import Variable
from itertools import combinations
import cv2

import antialiased_cnns

torch.multiprocessing.set_sharing_strategy('file_system')


# Deep Unsupervised Image Hashing by Maximizing Bit Entropy(AAAI2021)
# paper [Deep Unsupervised Image Hashing by Maximizing Bit Entropy](https://arxiv.org/pdf/2012.12334.pdf)
# code [Deep-Unsupervised-Image-Hashing](https://github.com/liyunqianggyn/Deep-Unsupervised-Image-Hashing)
# [BiHalf Unsupervised] epoch:40, bit:64, dataset:cifar10-2, MAP:0.593, Best MAP: 0.593
def get_config():
    config = {
        "gamma": 6,
        "optimizer": {"type": optim.SGD, "epoch_lr_decrease": 30,
                      "optim_params": {"lr": 0.00001, "weight_decay": 5e-4, "momentum": 0.9}},  # ori: "lr": 0.00001, "weight_decay": 5e-4, "momentum": 0.9

        "info": "[BiHalf Unsupervised]",
        "resize_size": 256,  # 256
        "crop_size": 224,  # 224
        "batch_size": 64,
        "net": BiHalfModelUnsupervised,
        "dataset": "voc2012",  # cifar10-2 in paper BiHalf is "Cifar-10(I)"  "coco"
        "epoch": 200,
        "test_map": 20,
        # "device":torch.device("cpu"),
        "device": torch.device("cuda:0"),
        "bit_list": [8],
    }
    config = config_dataset(config)

    config["topK"] = 1000

    return config

# add attention
# channel attention
class ChannelAttention(nn.Module):
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc1   = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
        self.relu1 = nn.ReLU()
        self.fc2   = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)

        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)
# spatial attention
class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()

        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1

        # add antialias to conv
        # self.conv1 = [nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False), antialiased_cnns.BlurPool(1, stride = 2)]
        self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        # self.antialias = antialiased_cnns.BlurPool(1, stride = 2)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x = torch.cat([avg_out, max_out], dim=1)
        x = self.conv1(x)
        # x = self.antialias(x)
        return self.sigmoid(x)

class BiHalfModelUnsupervised(nn.Module):
    def __init__(self, bit):
        super(BiHalfModelUnsupervised, self).__init__()
        # self.vgg = models.vgg16(pretrained=True)
        # self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier.children())[:6])
        # # self.densenet.classifier = nn.Sequential(*list(self.densenet.classifier.children())[:])
        # for param in self.vgg.parameters():
        #     param.requires_grad = False
        # self.fc_encode = nn.Linear(4096, bit)

        # ori densenet -view
        self.densenet = models.densenet161(pretrained=True)
        for param in self.densenet.parameters():
            param.requires_grad = False
        # self.fc_encode1 = nn.Linear(108192, 4096)
        # self.fc_encode2 = nn.Linear(4096, 1000)
        # self.fc_encode3 = nn.Linear(1000, bit)   # ori Linear layer for densenet  -4096-1000-8
        self.fc_encode1 = nn.Linear(108192, 1000)  # 108192~224x224
        self.fc_encode2 = nn.Linear(1000, bit)   # -1000-8
        # self.fc_encode1 = nn.Linear(108192, 1000)
        # self.fc_encode2 = nn.Linear(1000, 500)
        # self.fc_encode3 = nn.Linear(500, bit)   # -1000-500-8


        # # new densenet
        # self.densenet = models.densenet161(pretrained=True)
        # for param in self.densenet.parameters():
        #     param.requires_grad = False
        # # self.fc_encode1 = nn.Linear(1000, 500)
        # # self.fc_encode2 = nn.Linear(500, bit)
        # self.fc_encode = nn.Linear(1000, bit)     # features-1000-8

        # self.efficientnet = models.efficientnet_b0(pretrained=True)
        # for param in self.efficientnet.parameters():
        #     param.requires_grad = False

        # self.resnet = models.resnet101(pretrained=True)
        # for param in self.resnet.parameters():
        #     param.requires_grad = False
        # self.fc_encode = nn.Linear(1000, 500)

        # # google net
        # self.inception_net = models.inception_v3(pretrained = True)
        # for param in self.inception_net.parameters():
        #     param.requires_grad = False
        # self.fc_encode = nn.Linear(1000, bit)
        # add attention
        self.sa = SpatialAttention()
        self.ca = ChannelAttention(2208)

    class Hash(torch.autograd.Function):
        @staticmethod
        def forward(ctx, U):
            # Yunqiang for half and half (optimal transport)
            _, index = U.sort(0, descending=True)
            N, D = U.shape
            B_creat = torch.cat((torch.ones([int(N / 2), D]), -torch.ones([N - int(N / 2), D]))).to(config["device"]) # config["device"]
            B = torch.zeros(U.shape).to(config["device"]).scatter_(0, index, B_creat)  # This operation can make the top int(N/2) values in B map to 1, and the last N-int(N/2) map to -1
            ctx.save_for_backward(U, B)
            return B

        @staticmethod
        def backward(ctx, g):
            U, B = ctx.saved_tensors
            add_g = (U - B) / (B.numel())
            grad = g + config["gamma"] * add_g
            return grad

    def forward(self, x):
        # x = self.vgg.features(x)
        # # print(x.shape)
        # x = x.view(x.size(0), -1)
        # # print(x.shape)
        # x = self.vgg.classifier(x)
        # # print(x.shape)
        # h = self.fc_encode(x)

        # # x = self.efficientnet(x)
        # x = self.resnet(x)
        # # print(x.shape)
        # # x = x.view(x.size(0), -1)
        # # x = self.efficientnet.classifier(x)
        # # print(x.shape)
        # x = self.fc_encode1(x)
        # h = self.fc_encode2(x)


        # x = self.resnet(x)
        # x = x.view(x.size(0), -1)
        # print(x.shape)
        # h = self.fc_encode(x)

        
        # # add noise before conv
        # shape = x.shape
        # noise = (0.001**0.5)*torch.randn(shape)
        # x = x + noise.cuda()

        
        # ori densenet: densenet-view
        x = self.densenet.features(x)
        # change the location
        
        # add spatial attention 
        x = self.sa(x) * x
        # add channel attention
        x = self.ca(x) * x
        
        # print(x.shape)
        x = x.view(x.size(0), -1)
        # x = self.densenet.classifier(x)
        # print(x.shape)
        # x = self.fc_encode1(x)
        # x = self.fc_encode2(x)
        # h = self.fc_encode3(x)   # ori Linear layer for densenet, -4096-1000-8
        x = self.fc_encode1(x)
        h = self.fc_encode2(x)   # -1000-8
        # x = self.fc_encode1(x)
        # x = self.fc_encode2(x)
        # h = self.fc_encode3(x)    # -1000-500-8

        # # new densenet
        # x = self.densenet(x)
        # # print(x.shape)
        # # x = self.fc_encode1(x)
        # # h = self.fc_encode2(x)
        # h = self.fc_encode(x)    # features-1000-8

        # # google net
        # if self.training:
        #     x, _ = self.inception_net(x)
        # else:
        #     x = self.inception_net(x)
        # h = self.fc_encode(x)

        
        if not self.training:
            # mean = torch.mean(h)
            # h[h > mean] = 1
            # h[h <= mean] = -1
            return h
            # return h.sign()   # try another quantification method, compare with the average of h
            # b = BiHalfModelUnsupervised.Hash.apply(h)
            # return b
        else:
            b = BiHalfModelUnsupervised.Hash.apply(h)
            # print('size',b.size())
            target_b = F.cosine_similarity(b[:x.size(0) // 2], b[x.size(0) // 2:])
            target_x = F.cosine_similarity(x[:x.size(0) // 2], x[x.size(0) // 2:])
            # loss = F.mse_loss(target_b, target_x)
            # return loss
            return b, target_x, target_b, h

# # add discriminator to judge noised hash and ori
# class Discriminator(nn.Module):
#     def __init__(self):
#         super(Discriminator, self).__init__()

#         self.model = nn.Sequential(
#             nn.Linear(bit, 512),
#             nn.LeakyReLU(0.2, inplace=True),
#             nn.Linear(512, 256),
#             nn.LeakyReLU(0.2, inplace=True),
#             nn.Linear(256, 1),
#             nn.Sigmoid(),
#             )
#         # self.model = nn.Sequential(
#         #     nn.Linear(bit, 4),
#         #     nn.LeakyReLU(0.2, inplace=True),
#         #     nn.Linear(4, 2),
#         #     nn.LeakyReLU(0.2, inplace=True),
#         #     nn.Linear(2, 1),
#         #     nn.Sigmoid(),
#         #     )

#     def forward(self, hash):
#         validity = self.model(hash)

#         return validity

# add adversarial training, todo 1
def kl(inputs, targets, reduction="sum"):
    """
    计算kl散度
    inputs：tensor，logits
    targets：tensor，logits
    """
    loss = F.kl_div(F.log_softmax(inputs, dim=-1),
                    F.softmax(targets, dim=-1),
                    reduction=reduction)
    return loss
def adv_project(grad, norm_type='inf', eps=1e-6):
    if norm_type == 'l2':
        direction = grad / (torch.norm(grad, dim=-1, keepdim = True) + eps)
    elif norm_type == 'l1':
        direction = grad.sign()
    else:
        direction = grad / (grad.abs().max(-1, keepdim = True)[0] + eps)
    return direction

def virtual_adversarial_training(model, image, device):
    # noise = image.data.new(image.size()).normal_(0, 1) * 1e-4  # 1e-5
    # modify the generation of noise
    noise = (0.01**0.5)*torch.randn(image.shape)
    noise.requires_grad_()
    image_noise = image.data.detach() + noise.to(device)
    b, target_x, target_b, h = model(image)
    b_noise2, target_x_noise2, target_b_noise2, h_noise2 = model(image_noise)
    # ours 2022.9.13
    # adv_loss = F.mse_loss(b, b_noise2)  # F.mse_loss(b, b_noise2)
    adv_loss = kl(b, b_noise2, reduction="batchmean")
    delta_grad, = torch.autograd.grad(adv_loss, noise, only_inputs = True)
    # print('delta_grad:', delta_grad)
    norm = delta_grad.norm()
    if torch.isnan(norm) or torch.isinf(norm):
        return None
    noise = noise + delta_grad * 1e-2   # 1e-3
    noise = adv_project(noise, norm_type = 'l2', eps = 1e-6)
    new_image = image.data.detach() + image_noise
    new_image = new_image.detach()
    # train again
    b_new, target_x_new, target_b_new, h_new = model(new_image)
    # ours 2022.9.13
    # adv_loss = F.mse_loss(b, b_new)    # F.mse_loss(b, b_new)
    adv_loss = kl(b, b_new)
    return adv_loss



def train_val(config, bit):
    device = config["device"]
    train_loader, test_loader, dataset_loader, num_train, num_test, num_dataset = get_data(config)
    config["num_train"] = num_train
    net = config["net"](bit).to(device)

    # # add discriminator
    # cuda = True if torch.cuda.is_available() else False
    # discriminator = Discriminator().to(device)

    optimizer = config["optimizer"]["type"](net.parameters(), **(config["optimizer"]["optim_params"]))

    # # add discriminator
    # optimizer_D = config["optimizer"]["type"](discriminator.parameters(), **(config["optimizer"]["optim_params"]))
    # # Loss function
    # adversarial_loss = torch.nn.BCELoss()
    # Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

    Best_mAP = 0

    # # add attacks' parameters
    # noise_density = 0.01
    # filter_kernal = [3, 3]
    # rotation_angle = 10
    # translation = (5, 5)
    # print('noise_density: %.2f, filter_kernal: %d, rotation_angle: %d, translation: (%d, %d)' % (noise_density, filter_kernal[0], rotation_angle, translation[0], translation[1]))

    for epoch in range(config["epoch"]):

        lr = config["optimizer"]["optim_params"]["lr"] * (0.1 ** (epoch // config["optimizer"]["epoch_lr_decrease"]))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

        current_time = time.strftime('%H:%M:%S', time.localtime(time.time()))

        print("%s[%2d/%2d][%s] bit:%d, lr:%.9f, dataset:%s, training...." % (
            config["info"], epoch + 1, config["epoch"], current_time, bit, lr, config["dataset"]), end="")

        net.train()

        # # add discriminator loss
        # discriminator.train()

        train_loss = 0
        count = 0
        loss_dif = 0
        if "coco" in config["dataset"]:
            for image, ind in train_loader:   # _ means that no usage of hot-representation 
                # make sure F.cosine_similarity can be run, modify to split: image = image[:, -8:, :, :]
                if (image.size(0) % 2 ) == 0:
                    image = image.to(device)

                    # # add adversaral loss
                    # # Adversarial ground truths
                    # valid = Variable(Tensor(image.size(0), 1).fill_(1.0), requires_grad=False)
                    # fake = Variable(Tensor(image.size(0), 1).fill_(0.0), requires_grad=False)

                    # only extract low frequency part of the image, 1. add pre-process
                    # gauss = kornia.filters.GaussianBlur2d([1, 1], [1, 1])  # GaussianBlur2d [3, 3], [1, 1]
                    # image = gauss(image)


                    # # transform image to frequency domain
                    # # print(image.shape) # [64, 3, 224, 224]
                    # dct_tensor = torch.zeros(image.shape).cuda()
                    # for i in range(image.shape[0]):
                    #     # print('image[i]:', image[i].shape)
                    #     image_numpy = np.float32(image[i].numpy())
                    #     # print(image_numpy.shape) # (3,224,224), need to transform to (224, 224, 3)
                    #     b = image_numpy[0, :, :]
                    #     g = image_numpy[1, :, :]
                    #     r = image_numpy[2, :, :]
                    #     # b, g, r = cv2.split(image_numpy)
                    #     b_dct = cv2.dct(b)
                    #     g_dct = cv2.dct(g)
                    #     r_dct = cv2.dct(r)
                    #     dct1 = cv2.merge([b_dct, g_dct, r_dct])
                    #     # print('dct1', dct1.shape) # (224,224,3)
                    #     dct1_tensor = torch.tensor(dct1, dtype=float).to(device)
                    #     dct1_tensor_permute = dct1_tensor.permute(2,0,1)
                    #     dct_tensor[i] = torch.unsqueeze(dct1_tensor_permute, 0)
                    # # print('dct_tensor', dct_tensor.shape)

                    # print(image.shape)
                    # count += 1

                    # # add noise
                    # shape = image.shape
                    # noise = (0.01**0.5)*torch.randn(shape)
                    # image_noise1 = image + noise.to(device)

                    # add filter   to do: try to change noise as a variable; try to modify loss; try  to modify threshold to the average
                    gauss = kornia.filters.GaussianBlur2d([3, 3], [1, 1])  # GaussianBlur2d [3, 3], [1, 1]
                    image_noise1 = gauss(image)

                    # add rotation
                    image_noise2 = rotate(image, 10)

                    # add filter cv2
                    # image_noise2 = torch.zeros(image.shape).cuda()
                    # for i in range(image.shape[0]):
                    #     image_numpy = image[i].cpu().numpy()
                    #     # print('image_numpy:', image_numpy)
                    #     image_noise_numpy = cv2.GaussianBlur(image_numpy, (3, 3), 0)
                    #     image_noise_tensor = torch.tensor(image_noise_numpy).to(device)
                    #     image_noise2[i] = torch.unsqueeze(image_noise_tensor, 0)
                    # print('image:', image)
                    # print('image_noise2:', image_noise2)

                    # # transform image to frequency domain
                    # dct_tensor_noise = torch.zeros(image_noise2.shape).cuda()
                    # for j in range(image_noise2.shape[0]):
                    #     image_noise2_numpy = np.float32(image_noise2[i].numpy())
                    #     # b_noise, g_noise, r_noise = cv2.split(image_noise2_numpy)
                    #     b_noise = image_noise2_numpy[0, :, :]
                    #     g_noise = image_noise2_numpy[1, :, :]
                    #     r_noise = image_noise2_numpy[2, :, :]
                    #     b_noise_dct = cv2.dct(b_noise)
                    #     g_noise_dct = cv2.dct(g_noise)
                    #     r_noise_dct = cv2.dct(r_noise)
                    #     dct2 = cv2.merge([b_noise_dct, g_noise_dct, r_noise_dct])
                    #     dct2_tensor = torch.tensor(dct2, dtype=float).to(device)
                    #     dct2_tensor_permute = dct2_tensor.permute(2,0,1)
                    #     dct_tensor_noise[i] = torch.unsqueeze(dct2_tensor_permute, 0)

                    # # add all kinds of noise one by one, each batch
                    # if count % 4 == 0:
                    #     shape = image.shape
                    #     noise = (noise_density**0.5)*torch.randn(shape)
                    #     image_noise = image + noise.to(device)
                    # elif count % 4 == 1:
                    #     gauss = kornia.filters.GaussianBlur2d(filter_kernal, [1, 1])
                    #     image_noise = gauss(image)
                    # elif count % 4 == 2:
                    #     image_noise = rotate(image, rotation_angle)
                    # else:
                    #     image_noise = affine(image, translate = translation, angle = 0, scale = 1, shear = 0)
                    # count += 1

                    # Train Generator
                    optimizer.zero_grad()

                    # adversal training
                    # b, target_x, target_b, h = net(image)
                    # loss = F.mse_loss(target_b, target_x)
                    adv_loss = virtual_adversarial_training(net, image, device)
                    # loss = adv_loss * 20 + loss   # 10

                    # loss = net(image)

                    # modify noise loss
                    b, target_x, target_b, h = net(image)
                    # print("target_b:",target_b.shape)
                    b_noise1, target_x_noise1, target_b_noise1, h_noise1 = net(image_noise1)
                    b_noise2, target_x_noise2, target_b_noise2, h_noise2 = net(image_noise2)

                    # # add adversarial loss, ori + generator adversarial loss
                    # g_loss = F.mse_loss(target_b, target_x) + 2 * adversarial_loss(discriminator(Variable(Tensor(b_noise2)).detach()), valid)
                    # g_loss.backward()
                    # optimizer.step()

                    # b, target_x, target_b, h = net(dct_tensor)
                    # print('target_b', target_b)
                    # print('b',b)
                    # print('0:',b[0])
                    # print('1:',b[1])
                    # print('63:',b[63])

                    # # create combinations
                    # Com_list = list(combinations([i for i in range(b.shape[0])], 2))
                    # # print(Com_list)
                    # for i in range(len(Com_list)):
                    #     u = Com_list[i][0]
                    #     v = Com_list[i][1]
                    #     # print('u', b[u])
                    #     # print('v', b[v])
                    #     loss_dif -= F.mse_loss(b[u], b[v])
                    #     print(loss_dif)
                    # # normalization
                    # loss_dif = loss_dif / len(Com_list)
                    # for i in range(h.shape[0]):
                    #     loss_dif -= F.mse_loss(h[0], h[i])
                    #     # print(loss_dif)
                    # loss_dif = loss_dif / h.shape[0]
                    # # print('dif',loss_dif)
                    # b_noise1, target_x_noise1, target_b_noise1, h_noise1 = net(image_noise1)
                    # loss = F.mse_loss(b, b_noise)
                    # print('sim',loss.item())

                    # add adversarial loss
                    # # Train discriminator
                    # optimizer_D.zero_grad()
                    # real_loss = adversarial_loss(discriminator(Variable(Tensor(b)).detach()), valid)
                    # fake_loss = adversarial_loss(discriminator(Variable(Tensor(b_noise2)).detach()), fake)
                    # d_loss = (real_loss + fake_loss) / 2
                    # d_loss.backward()
                    # optimizer_D.step()
                    
                    # b_noise2, target_x_noise2, target_b_noise2, h_noise2 = net(dct_tensor_noise)

                    # addnoise-loss0.5
                    # loss = F.mse_loss(target_b, target_x) + 0.2 * F.mse_loss(b, b_noise)

                    # # addfilter-loss0.5 best, 2022.6.20   
                    # loss = F.mse_loss(target_b, target_x) + 0.5 * F.mse_loss(b, b_noise2)
                    # 2022.7.23 best add spatial+channel attention + addfilter-loss1.0,  !!Note that addfilter-loss means F.mse(b, b_noise2) while addfilter target means F.mse_loss(target_b_noise2, target_x_noise2)!!
                    loss = F.mse_loss(target_b, target_x) + 0.7 * F.mse_loss(b, b_noise2) + 0.3 * F.mse_loss(b, b_noise1) + 1.2 * adv_loss
                    # loss = F.kl_div(target_b.softmax(dim=-1).log(), target_x.softmax(dim=-1), reduction = 'sum') + 1.0 * F.kl_div(b.softmax(dim=-1).log(), b_noise2.softmax(dim=-1), reduction = 'sum')
                    # loss = F.mse_loss(target_b, target_x) + 1.0 * F.kl_div(b.softmax(dim=-1).log(), b_noise2.softmax(dim=-1), reduction = 'sum')

                    # addfilter target 1:1 best, 2022.6.28   loss = F.mse_loss(target_b, target_x) + 1.0 * F.mse_loss(target_b_noise2, target_x_noise2)
                    # loss = F.mse_loss(target_b, target_x) + 1.5 * F.mse_loss(target_b_noise2, target_x_noise2)

                    # try to modify loss, change all of the mse_loss to kl
                    # add kl loss
                    # kl = F.kl_div(h.softmax(dim=-1).log(), h_noise2.softmax(dim=-1), reduction = 'sum')
                    # loss = F.mse_loss(target_b, target_x) + 1.0 * F.mse_loss(target_b_noise2, target_x_noise2) + 0.1 * kl

                    # # add noise loss
                    # loss_noise = net(image_noise)
                    # loss = (loss + loss_noise) /2

                    train_loss += loss.item()

                    # train_loss += loss_dif

                    loss.backward()
                    optimizer.step()
                # print('imagesNum:', count)
        else:
            for image, _, ind in train_loader:   # _ means that no usage of hot-representation 
                # make sure F.cosine_similarity can be run, modify to split: image = image[:, -8:, :, :]
                if (image.size(0) % 2 ) == 0:
                    image = image.to(device)

                    # # add adversaral loss
                    # # Adversarial ground truths
                    # valid = Variable(Tensor(image.size(0), 1).fill_(1.0), requires_grad=False)
                    # fake = Variable(Tensor(image.size(0), 1).fill_(0.0), requires_grad=False)

                    # only extract low frequency part of the image, 1. add pre-process
                    # gauss = kornia.filters.GaussianBlur2d([1, 1], [1, 1])  # GaussianBlur2d [3, 3], [1, 1]
                    # image = gauss(image)


                    # # transform image to frequency domain
                    # # print(image.shape) # [64, 3, 224, 224]
                    # dct_tensor = torch.zeros(image.shape).cuda()
                    # for i in range(image.shape[0]):
                    #     # print('image[i]:', image[i].shape)
                    #     image_numpy = np.float32(image[i].numpy())
                    #     # print(image_numpy.shape) # (3,224,224), need to transform to (224, 224, 3)
                    #     b = image_numpy[0, :, :]
                    #     g = image_numpy[1, :, :]
                    #     r = image_numpy[2, :, :]
                    #     # b, g, r = cv2.split(image_numpy)
                    #     b_dct = cv2.dct(b)
                    #     g_dct = cv2.dct(g)
                    #     r_dct = cv2.dct(r)
                    #     dct1 = cv2.merge([b_dct, g_dct, r_dct])
                    #     # print('dct1', dct1.shape) # (224,224,3)
                    #     dct1_tensor = torch.tensor(dct1, dtype=float).to(device)
                    #     dct1_tensor_permute = dct1_tensor.permute(2,0,1)
                    #     dct_tensor[i] = torch.unsqueeze(dct1_tensor_permute, 0)
                    # # print('dct_tensor', dct_tensor.shape)

                    # print(image.shape)
                    # count += 1

                    # # add noise
                    # shape = image.shape
                    # noise = (0.01**0.5)*torch.randn(shape)
                    # image_noise1 = image + noise.to(device)

                    # add filter   to do: try to change noise as a variable; try to modify loss; try  to modify threshold to the average
                    gauss = kornia.filters.GaussianBlur2d([3, 3], [1, 1])  # GaussianBlur2d [3, 3], [1, 1]
                    image_noise1 = gauss(image)

                    # add rotation
                    image_noise2 = rotate(image, 10)

                    # add filter cv2
                    # image_noise2 = torch.zeros(image.shape).cuda()
                    # for i in range(image.shape[0]):
                    #     image_numpy = image[i].cpu().numpy()
                    #     # print('image_numpy:', image_numpy)
                    #     image_noise_numpy = cv2.GaussianBlur(image_numpy, (3, 3), 0)
                    #     image_noise_tensor = torch.tensor(image_noise_numpy).to(device)
                    #     image_noise2[i] = torch.unsqueeze(image_noise_tensor, 0)
                    # print('image:', image)
                    # print('image_noise2:', image_noise2)

                    # # transform image to frequency domain
                    # dct_tensor_noise = torch.zeros(image_noise2.shape).cuda()
                    # for j in range(image_noise2.shape[0]):
                    #     image_noise2_numpy = np.float32(image_noise2[i].numpy())
                    #     # b_noise, g_noise, r_noise = cv2.split(image_noise2_numpy)
                    #     b_noise = image_noise2_numpy[0, :, :]
                    #     g_noise = image_noise2_numpy[1, :, :]
                    #     r_noise = image_noise2_numpy[2, :, :]
                    #     b_noise_dct = cv2.dct(b_noise)
                    #     g_noise_dct = cv2.dct(g_noise)
                    #     r_noise_dct = cv2.dct(r_noise)
                    #     dct2 = cv2.merge([b_noise_dct, g_noise_dct, r_noise_dct])
                    #     dct2_tensor = torch.tensor(dct2, dtype=float).to(device)
                    #     dct2_tensor_permute = dct2_tensor.permute(2,0,1)
                    #     dct_tensor_noise[i] = torch.unsqueeze(dct2_tensor_permute, 0)

                    # # add all kinds of noise one by one, each batch
                    # if count % 4 == 0:
                    #     shape = image.shape
                    #     noise = (noise_density**0.5)*torch.randn(shape)
                    #     image_noise = image + noise.to(device)
                    # elif count % 4 == 1:
                    #     gauss = kornia.filters.GaussianBlur2d(filter_kernal, [1, 1])
                    #     image_noise = gauss(image)
                    # elif count % 4 == 2:
                    #     image_noise = rotate(image, rotation_angle)
                    # else:
                    #     image_noise = affine(image, translate = translation, angle = 0, scale = 1, shear = 0)
                    # count += 1

                    # Train Generator
                    optimizer.zero_grad()

                    # adversal training
                    # b, target_x, target_b, h = net(image)
                    # loss = F.mse_loss(target_b, target_x)
                    adv_loss = virtual_adversarial_training(net, image, device)
                    # loss = adv_loss * 20 + loss   # 10

                    # loss = net(image)

                    # modify noise loss
                    b, target_x, target_b, h = net(image)
                    # print("target_b:",target_b.shape)
                    b_noise1, target_x_noise1, target_b_noise1, h_noise1 = net(image_noise1)
                    b_noise2, target_x_noise2, target_b_noise2, h_noise2 = net(image_noise2)

                    # # add adversarial loss, ori + generator adversarial loss
                    # g_loss = F.mse_loss(target_b, target_x) + 2 * adversarial_loss(discriminator(Variable(Tensor(b_noise2)).detach()), valid)
                    # g_loss.backward()
                    # optimizer.step()

                    # b, target_x, target_b, h = net(dct_tensor)
                    # print('target_b', target_b)
                    # print('b',b)
                    # print('0:',b[0])
                    # print('1:',b[1])
                    # print('63:',b[63])

                    # # create combinations
                    # Com_list = list(combinations([i for i in range(b.shape[0])], 2))
                    # # print(Com_list)
                    # for i in range(len(Com_list)):
                    #     u = Com_list[i][0]
                    #     v = Com_list[i][1]
                    #     # print('u', b[u])
                    #     # print('v', b[v])
                    #     loss_dif -= F.mse_loss(b[u], b[v])
                    #     print(loss_dif)
                    # # normalization
                    # loss_dif = loss_dif / len(Com_list)
                    # for i in range(h.shape[0]):
                    #     loss_dif -= F.mse_loss(h[0], h[i])
                    #     # print(loss_dif)
                    # loss_dif = loss_dif / h.shape[0]
                    # # print('dif',loss_dif)
                    # b_noise1, target_x_noise1, target_b_noise1, h_noise1 = net(image_noise1)
                    # loss = F.mse_loss(b, b_noise)
                    # print('sim',loss.item())

                    # add adversarial loss
                    # # Train discriminator
                    # optimizer_D.zero_grad()
                    # real_loss = adversarial_loss(discriminator(Variable(Tensor(b)).detach()), valid)
                    # fake_loss = adversarial_loss(discriminator(Variable(Tensor(b_noise2)).detach()), fake)
                    # d_loss = (real_loss + fake_loss) / 2
                    # d_loss.backward()
                    # optimizer_D.step()
                    
                    # b_noise2, target_x_noise2, target_b_noise2, h_noise2 = net(dct_tensor_noise)

                    # addnoise-loss0.5
                    # loss = F.mse_loss(target_b, target_x) + 0.2 * F.mse_loss(b, b_noise)

                    # # addfilter-loss0.5 best, 2022.6.20   
                    # loss = F.mse_loss(target_b, target_x) + 0.5 * F.mse_loss(b, b_noise2)
                    # 2022.7.23 best add spatial+channel attention + addfilter-loss1.0,  !!Note that addfilter-loss means F.mse(b, b_noise2) while addfilter target means F.mse_loss(target_b_noise2, target_x_noise2)!!
                    loss = F.mse_loss(target_b, target_x) + 0.7 * F.mse_loss(b, b_noise2) + 0.3 * F.mse_loss(b, b_noise1) + 1.2 * adv_loss
                    # loss = F.kl_div(target_b.softmax(dim=-1).log(), target_x.softmax(dim=-1), reduction = 'sum') + 1.0 * F.kl_div(b.softmax(dim=-1).log(), b_noise2.softmax(dim=-1), reduction = 'sum')
                    # loss = F.mse_loss(target_b, target_x) + 1.0 * F.kl_div(b.softmax(dim=-1).log(), b_noise2.softmax(dim=-1), reduction = 'sum')

                    # addfilter target 1:1 best, 2022.6.28   loss = F.mse_loss(target_b, target_x) + 1.0 * F.mse_loss(target_b_noise2, target_x_noise2)
                    # loss = F.mse_loss(target_b, target_x) + 1.5 * F.mse_loss(target_b_noise2, target_x_noise2)

                    # try to modify loss, change all of the mse_loss to kl
                    # add kl loss
                    # kl = F.kl_div(h.softmax(dim=-1).log(), h_noise2.softmax(dim=-1), reduction = 'sum')
                    # loss = F.mse_loss(target_b, target_x) + 1.0 * F.mse_loss(target_b_noise2, target_x_noise2) + 0.1 * kl

                    # # add noise loss
                    # loss_noise = net(image_noise)
                    # loss = (loss + loss_noise) /2

                    train_loss += loss.item()

                    # train_loss += loss_dif

                    loss.backward()
                    optimizer.step()
                # print('imagesNum:', count)

        train_loss = train_loss / len(train_loader)

        print("\b\b\b\b\b\b\b loss:%.9f" % (train_loss))

        # print(
        #     "[Epoch %d/%d] [D loss: %f] [G loss: %f]"
        #     % (epoch, config["epoch"], d_loss.item(), g_loss.item())
        # )
        # print(config)

        # for name, parms in net.named_parameters():
        #     print('-->name:', name, '-->grad_requirs:', parms.requires_grad)

        # if (epoch + 1) % config["test_map"] == 0:
        #     # print("calculating test binary code......")
        #     tst_binary, tst_label = compute_result(test_loader, net, device=device)

        #     # print("calculating dataset binary code.......")\
        #     trn_binary, trn_label = compute_result(dataset_loader, net, device=device)

        #     # print("calculating map.......")
        #     mAP = CalcTopMap(trn_binary.numpy(), tst_binary.numpy(), trn_label.numpy(), tst_label.numpy(),
        #                      config["topK"])

        #     if mAP > Best_mAP:
        #         Best_mAP = mAP

        #         if "save_path" in config:
        #             if not os.path.exists(config["save_path"]):
        #                 os.makedirs(config["save_path"])
        #             print("save in ", config["save_path"])
        #             np.save(os.path.join(config["save_path"], config["dataset"] + str(mAP) + "-" + "trn_binary.npy"),
        #                     trn_binary.numpy())
        #             torch.save(net.state_dict(),
        #                        os.path.join(config["save_path"], config["dataset"] + "-" + str(mAP) + "-model.pt"))
        #     print("%s epoch:%d, bit:%d, dataset:%s, MAP:%.3f, Best MAP: %.3f" % (
        #         config["info"], epoch + 1, bit, config["dataset"], mAP, Best_mAP))
        #     print(config)

        # save model
        # path_to_outputs_dir = './outputs'
        if (epoch + 1) % 100 == 0:
            # path_to_checkpoints_dir = os.path.join(path_to_outputs_dir, 'checkpoints-{:s}'.format(epoch+1))
            torch.save(net.state_dict(), './our_output/BiHalf_voc2012_all_train+val_densenet_spatial+channel_attention_afterfeatures_addfilter+rotation-loss0.3-0.7+advloss1.2-kl_noise0.01_1000-8_output/8bits/ep%03d.pth' % (epoch + 1))


if __name__ == "__main__":
    config = get_config()
    print(config)
    # efficientnet = models.efficientnet_b0()
    # print(efficientnet)
    # net = models.densenet161().classifier()
    # print(net)
    # resnet = models.resnet101()
    # print(resnet)
    # inception_net = models.inception_v3(pretrained = True)
    # print(inception_net)
    for bit in config["bit_list"]:
        train_val(config, bit)
