import torch
import torchvision.models as models
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.datasets as torchdata
import torch.utils.data as Data
from collections import namedtuple
import os
from Retrieval_tools2 import *
from Retrieval_utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "2"


import argparse
parser = argparse.ArgumentParser(description='BlockDrop Training')
# parser.add_argument('--lr_GoogLeNet', type=float, default=1e-4, help='learning rate')
# parser.add_argument('--lr_rest', type=float, default=2*1e-4, help='learning rate')
parser.add_argument('--lr_GoogLeNet', type=float, default=1e-4, help='learning rate')
parser.add_argument('--lr_rest', type=float, default=1e-3, help='learning rate')
# parser.add_argument('--lr_decay_ratio', type=float, default=0.9, help='lr *= lr_decay_ratio after epoch_steps')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--batch_size', type=int, default=16, help='batch size')
parser.add_argument('--max_epochs', type=int, default=500, help='total epochs to run')
# parser.add_argument('--epoch_step', type=int, default=20, help='epochs after which lr is decayed')
parser.add_argument('--cv_dir', default='../shangming_Retrieval_4', help='checkpoint directory (models and logs are saved here)')
parser.add_argument('--hash_length', type=int, default=48, help='hash code length')
parser.add_argument('--margin', type=float, default=16, help='margin of triplet loss')
# ------ RL part --------------------------------------------------------------------
# -----------------------------------------------------------------------------------
args = parser.parse_args()

# define the Pytorch Tensor
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor

def pre_train_hash(epoch):
    
    print('epoch = ', epoch)
    featureExtractor_1.train()
    featureExtractor_2.train()
    hashNet.train()

    accum_loss =0
    for batch_idx, (images, labels) in enumerate(trainval_loader):

        if images.shape[0] == 1:
            continue
        
        images, labels = Variable(images).cuda(), Variable(labels).cuda()
        imageFeatures_1 = featureExtractor_1.forward(images)  # torch.Size([16, 1024, 14, 14])
        imageFeatures = featureExtractor_2.forward(imageFeatures_1)  # torch.Size([16, 480, 14, 14])
        hashFeatures, label_pre = hashNet.forward(imageFeatures)
        # print('imageFeatures.shape = ', imageFeatures.shape)

        triplet_loss, len_triplets = multi_triplet_loss.forward(hashFeatures, labels)
        # triplet_loss, len_triplets = multi_triplet_loss.forward_hard_mining(hashFeatures, labels)
        # print('len_triplets = ', len_triplets)
        entropy_loss = MultiLabel_CrossEntropyLoss(label_pre, labels)

        # print("triplet_loss: %.6f, entropy_loss: %.6f " % (triplet_loss, entropy_loss))

        if len_triplets != 0:
            # print('hei')
            loss = triplet_loss + entropy_loss
            # loss = triplet_loss
            optimizer_image.zero_grad()
            loss.backward()
            # print('images.grad.data = ', images.grad.data)
            optimizer_image.step()
            accum_loss += loss.data.item()
        # else:
        #     optimizer_image.zero_grad()
        #     entropy_loss.backward()
        #     # print('images.grad.data', images.grad.data)
        #     optimizer_image.step()
        #     accum_loss += entropy_loss.data.item()
        
    print("epoch: %d, accum_loss: %.6f " % (epoch, accum_loss))
    s =  'epoch = ' + str(epoch) + ',  accum_loss = ' + str(accum_loss)
    torch.save(s, args.cv_dir+'/'+str(epoch)+'.txt')
    

def test_hash(epoch):

    featureExtractor_1.eval()
    featureExtractor_2.eval()
    hashNet.eval()

    tst_binary, tst_label, tst_time = compute_result_image(test_loader, featureExtractor_1, 
                                                                    featureExtractor_2, hashNet)
    db_binary, db_label, db_time = compute_result_image(db_loader, featureExtractor_1, 
                                                                    featureExtractor_2, hashNet)
    # print('test_codes_time = %.6f, db_codes_time = %.6f'%(tst_time ,db_time))

    mAP = compute_mAP_MultiLabels(db_binary, tst_binary, db_label, tst_label)
    print("epoch: %d, retrieval mAP: %.6f" %(epoch, mAP))
    # logger.add_scalar('retrieval_mAP', mAP, epoch)

    torch.save(featureExtractor_1.state_dict(), args.cv_dir+'/ckpt_E_%d_mAP_%.5f_featureExtractor_1.t7'%(epoch, mAP))
    torch.save(featureExtractor_2.state_dict(), args.cv_dir+'/ckpt_E_%d_mAP_%.5f_featureExtractor_2.t7'%(epoch, mAP))
    torch.save(hashNet.state_dict(), args.cv_dir+'/ckpt_E_%d_mAP_%.5f_hashNet.t7'%(epoch, mAP))

    return tst_time



# 得到了 经过预处理(resize、归一化等操作)的训练集和测试集
print('load data !')
# trainval_loader, test_loader, compute_test_loader, compute_trainval_loader = init_voc_dataloader(args)
trainval_loader, test_loader, db_loader = init_voc_dataloader_mirror(args)

# online_triplet_loss = OnlineTripletLoss(args.margin)
multi_triplet_loss = MultiLabelTripletLoss(args.margin)

# load models
print('load featureExtractor and hashNet !')

# ---------------------------------------------------------------------------------------------------------
featureExtractor_1 = FeatureExtractor_1()
featureExtractor_1.cuda()
featureExtractor_2 = FeatureExtractor_2()
featureExtractor_2.cuda()

hashNet = HashNet(code_length=args.hash_length)
hashNet.cuda()
# featureExtractor_1_weights_path = '../mirror_shangming_Retrieval_3/ckpt_E_500_mAP_0.89544_featureExtractor_1.t7'
# featureExtractor_1 = FeatureExtractor_1(featureExtractor_1_weights_path)
# featureExtractor_1.cuda()
# featureExtractor_2_weights_path = '../mirror_shangming_Retrieval_3/ckpt_E_500_mAP_0.89544_featureExtractor_2.t7'
# featureExtractor_2 = FeatureExtractor_2(featureExtractor_2_weights_path)
# featureExtractor_2.cuda()

# hashNet_weights_path = '../mirror_shangming_Retrieval_3/ckpt_E_500_mAP_0.89544_hashNet.t7'
# hashNet = HashNet()
# hashNet.load_state_dict(torch.load(hashNet_weights_path))
# hashNet.cuda()

# --- 1 -----------------------------------
featureExtractor_1_names = dict(featureExtractor_1.named_parameters())
featureExtractor_1_paras_new = []
for k, v in featureExtractor_1_names.items():  # k为layer的名字，v为layer的实际层
    # print(k.ljust(30), str(v.shape).ljust(30), 'bias:', v.requires_grad)
    if 'bias' in k:
        featureExtractor_1_paras_new += [{'params': [v], 'lr': 2*args.lr_GoogLeNet, 'weight_decay': 0}]
    else:
        featureExtractor_1_paras_new += [{'params': [v], 'lr': args.lr_GoogLeNet, 'weight_decay': args.weight_decay}]
# --- 1 end -------------------------------

# --- 2 -----------------------------------
featureExtractor_2_names = dict(featureExtractor_2.named_parameters())
featureExtractor_2_paras_new = []
for k, v in featureExtractor_2_names.items():  # k为layer的名字，v为layer的实际层
    # print(k.ljust(30), str(v.shape).ljust(30), 'bias:', v.requires_grad)
    if 'bias' in k:
        featureExtractor_2_paras_new += [{'params': [v], 'lr': 2*args.lr_rest, 'weight_decay': 0}]
    else:
        featureExtractor_2_paras_new += [{'params': [v], 'lr': args.lr_rest, 'weight_decay': args.weight_decay}]
# --- 2 end -------------------------------

# --- 3 -----------------------------------
hashNet_names = dict(hashNet.named_parameters())
hashNet_paras_new = []
for k, v in hashNet_names.items():  # k为layer的名字，v为layer的实际层
    # print(k.ljust(30), str(v.shape).ljust(30), 'bias:', v.requires_grad)
    if 'bias' in k:
        hashNet_paras_new += [{'params': [v], 'lr': 2*args.lr_rest, 'weight_decay': 0}]
    else:
        hashNet_paras_new += [{'params': [v], 'lr': args.lr_rest, 'weight_decay': args.weight_decay}]
# --- 3 end -------------------------------

# params = [
#     {"params": featureExtractor_1.parameters(), "lr": args.lr_GoogLeNet},
#     {"params": featureExtractor_2.parameters(), "lr": args.lr_rest},
#     {"params": hashNet.parameters(), "lr": args.lr_rest},
# ]
# optimizer_image = optim.SGD(params, momentum=0.9, weight_decay=0.0005)
optimizer_image = optim.SGD((featureExtractor_1_paras_new + featureExtractor_2_paras_new + hashNet_paras_new), momentum=0.9)
# lr_scheduler_image = LrScheduler(optimizer_image, args.lr_GoogLeNet, args.lr_rest, args.lr_decay_ratio, args.epoch_step)

# ---------------------------------------------------------------------------------------------------------


start_epoch = 0
total_tst_time = 0
test_cnt = 0
loss_print = 0
MODEL_UPDATE_ITER = 0

# epoch = 0
# pre_train_hash(epoch)

# train 1
for epoch in range(start_epoch, start_epoch+args.max_epochs+1):

    # lr_scheduler_image.adjust_learning_rate(epoch)
    pre_train_hash(epoch)
    if epoch % 10 == 0:
        _ = test_hash(epoch)







































# import torch
# import torchvision.models as models
# import torch.optim as optim
# import torchvision.transforms as transforms
# from torch.autograd import Variable
# import torch.nn.functional as F
# import torch.utils.data as torchdata
# import torch.utils.data as Data
# from collections import namedtuple
# import os
# from PIL import Image, ImageDraw, ImageFont
# from Retrieval_tools import *
# from Retrieval_utils import *
# os.environ["CUDA_VISIBLE_DEVICES"] = "3"

# import argparse

# # -----------------------------------------------------------------------------------


# # define the Pytorch Tensor
# use_cuda = torch.cuda.is_available()
# FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
# ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
# Tensor = FloatTensor

# class MyDataset_visual(torchdata.Dataset):
#     def __init__(self, txt='../dataset/VOCdevkit/onehot_trainval_2007',
#                  transform=None, target_transform=None, loader=default_loader):
#         self.transform = transform
#         self.target_transform = target_transform
#         self.loader = loader
#         file = torch.load(txt)
#         imgs = []
#         # for i in range(len(file)):
#         for i in range(500):
#             line = file[i]
#             imgpath = line.strip().split()[0]
#             label = line.strip().split()[1:]
#             label = list(map(int, label))
#             words = [imgpath, label]
#             imgs.append(words)
#         self.imgs = imgs

#     def __getitem__(self, index):
#         words = self.imgs[index]
#         img = self.loader('../dataset/VOCdevkit/VOC2007/JPEGImages/'+words[0]+'.jpg')
#         if self.transform is not None:
#             img = self.transform(img)
#         label = torch.LongTensor(words[1])
#         # return img, label
#         # ----------- 以下为修改部分 -------------------------------------------------------
#         # tem = []
#         # tem.append(int(words[0]))
#         # img_name = torch.Tensor(tem).int()
#         # print(img_name)
#         # return img_name, img, label
#         return words[0], img, label
#         # ---------- end ------------------------------------------------------------------

#     def __len__(self):
#         return len(self.imgs)


# def init_voc_dataloader_visual():

#     mean = (0.4914, 0.4822, 0.4465)
#     std = (0.2023, 0.1994, 0.2010)
#     transform_train = transforms.Compose([
#         # transforms.Resize((256, 256)),
#         transforms.Resize((448, 448)),
#         # transforms.RandomCrop((224, 224)),
#         # transforms.RandomHorizontalFlip(),
#         transforms.ToTensor(),
#         transforms.Normalize(mean=mean, std=std)
#     ])
#     transform_test = transforms.Compose([
#         transforms.Resize((448, 448)),
#         # transforms.CenterCrop((224, 224)),
#         transforms.ToTensor(),
#         transforms.Normalize(mean=mean, std=std)
#     ])

#     test_dir = '../dataset/VOCdevkit/onehot_test_2007'
#     trainval_dir = '../dataset/VOCdevkit/onehot_trainval_2007'
#     batch_size = 15

#     test_set = MyDataset_visual(txt=test_dir, transform=transform_test)
#     test_loader = torchdata.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4)

#     # trainval_set = MyDataset_visual(txt=trainval_dir, transform=transform_train)
#     # trainval_loader = torchdata.DataLoader(trainval_set, batch_size=batch_size, shuffle=False, num_workers=4)

#     # return trainval_loader
#     return test_loader
#     # return trainval_loader, test_loader


# # ***********____________________***********************____________________
# path_voc = "../dataset/VOCdevkit/VOC2007/JPEGImages/"

# path_font = "/usr/share/fonts/liberation/LiberationMono-Regular.ttf"
# path_testing_folder = "../test_testing/"
# font = ImageFont.truetype(path_font, 24)
# # ***********____________________***********************____________________
# def draw_sequences_test(step, action, draw, region_image, background, path_testing_folder,
#                         region_mask, image_name):
#     aux = np.asarray(region_image, np.uint8)
#     img_offset = (1000 * step, 70)
#     footnote_offset = (1000 * step, 550)
#     q_predictions_offset = (1000 * step, 500)
#     mask_img_offset = (1000 * step, 700)
#     img_for_paste = Image.fromarray(aux)
#     background.paste(img_for_paste, img_offset)
#     mask_img = Image.fromarray(255 * region_mask)
#     background.paste(mask_img, mask_img_offset)
#     footnote = 'action: ' + string_for_action(action)
#     # iou = 'iou = ' + str(iou)
#     # q_val_predictions_text = str(qval)
#     draw.text(footnote_offset, footnote, (0, 0, 0), font=font)
#     # draw.text(q_predictions_offset, q_val_predictions_text, (0, 0, 0), font=font)
#     # draw.text(q_predictions_offset, iou, (0, 0, 0), font=font)
#     file_name = path_testing_folder + image_name + '.png'
#     # if save_boolean == 1:
#     background.save(file_name)
#     return background


# # 得到了 经过预处理(resize、归一化等操作)的训练集和测试集
# print('load data !')
# trainval_loader = init_voc_dataloader_visual()

# featureExtractor_1_weights_path = '../shangming_Retrieval_5/ckpt_E_480_mAP_0.87413_featureExtractor_1.t7'
# featureExtractor_1 = FeatureExtractor_1(featureExtractor_1_weights_path)
# featureExtractor_1.cuda()
# featureExtractor_2_weights_path = '../shangming_Retrieval_5/ckpt_E_480_mAP_0.87413_featureExtractor_2.t7'
# featureExtractor_2 = FeatureExtractor_2(featureExtractor_2_weights_path)
# featureExtractor_2.cuda()

# hashNet_weights_path = '../shangming_Retrieval_5/ckpt_E_480_mAP_0.87413_hashNet.t7'
# hashNet = HashNet()
# hashNet.load_state_dict(torch.load(hashNet_weights_path))
# hashNet.cuda()

# featureExtractor_1.eval()
# featureExtractor_2.eval()
# hashNet.eval()


# for batch_idx, (image_names, images, labels) in enumerate(trainval_loader):

#     with torch.no_grad():
#         images, labels = Variable(images).cuda(), Variable(labels).cuda()

#     imageFeatures_1 = featureExtractor_1.forward(images)  # torch.Size([16, 1024, 14, 14])
#     imageFeatures = featureExtractor_2.forward(imageFeatures_1)  # torch.Size([16, 480, 14, 14])
#     mask = hashNet.forward_mask(imageFeatures)



#     for i in range(images.shape[0]):
#         image_name = image_names[i]
#         image_path = path_voc + image_name + ".jpg"
#         image_paint = np.array(Image.open(image_path))
#         image_size = image_paint.shape
#         # print('image_size = ', image_size)


#         background = Image.new('RGBA', (1200, 600), (255, 255, 255, 255))
#         draw = ImageDraw.Draw(background)


#         img_offset = (10, 10)
#         aux = np.asarray(image_paint, np.uint8)
#         # print('aux.type = ', type(aux))
#         img_for_paste = Image.fromarray(aux)
#         background.paste(img_for_paste, img_offset)


#         region_mask = np.array(mask[i][0].detach().cpu(), np.uint8)
#         # print('region_mask.type = ', type(region_mask))
#         # print('region_mask = ', region_mask)
#         mask_img_offset = (600, 10)
#         mask_img = Image.fromarray(region_mask)

#         transform = transforms.Compose([
#             transforms.Resize((image_size[0], image_size[1]))])
#         mask_img = transform(mask_img)
#         # print('mask_img = ', np.array(mask_img))


#         mask_img = np.array(mask_img)
#         # --------------------------------
#         img_after_mask_np = np.zeros([aux.shape[0], aux.shape[1], aux.shape[2]])
#         img_after_mask_np[:, :, 0] = aux[:, :, 0] * mask_img
#         img_after_mask_np[:, :, 1] = aux[:, :, 1] * mask_img
#         img_after_mask_np[:, :, 2] = aux[:, :, 2] * mask_img
#         img_after_mask_np = np.asarray(img_after_mask_np, np.uint8)
#         # --------------------------------
#         img_after_mask_Image = Image.fromarray(img_after_mask_np)
#         background.paste(img_after_mask_Image, mask_img_offset)

#         # background.paste(255*mask_img, mask_img_offset)

#         file_name = path_testing_folder + image_name + '.png'
#         background.save(file_name)












