import torch
import torchvision.models as models
import torch.optim as optim
import torchvision.transforms as transforms
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.datasets as torchdata
import torch.utils.data as Data
from collections import namedtuple
import os
import copy
import numpy as np
from torch import autograd
from utils import *
from tools import *

# python baseline.py --lr 1e-3 --cv_dir baseline_1e-3  效果很差
# python baseline.py --lr 1e-4 --cv_dir baseline_1e-4  tmux22
# python single_experiment.py --dataset CUB --num_shots 0 --generalized True
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import argparse
parser = argparse.ArgumentParser(description='BlockDrop Training')
parser.add_argument('--lr', type=float, default=1e-7, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--margin', type=float, default=10, help='margin of triplet loss')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay')
# parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--batch_size', type=int, default=100, help='batch size')
parser.add_argument('--max_epochs', type=int, default=500, help='total epochs to run')
parser.add_argument('--cv_dir', default='baseline_1e-7_seed_alexnet')
# -----------------------------------------------------------------------------------
args = parser.parse_args()
# define the Pytorch Tensor
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor


seed = 1
torch.manual_seed(seed)  # cpu
torch.cuda.manual_seed_all(seed)  # gpu  # 通过设置这个，保证每次运行按次序生成的随机数一样



def train(epoch):

    net.train()

    accum_loss = 0

    for batch_idx, (images, labels) in enumerate(train_loader):

        # if batch_idx == 0:
        # print('batch_idx = ', batch_idx)

        images, labels = Variable(images).cuda(), Variable(labels).cuda()

        hashcodes = net.forward(images)
        hashcodes = torch.tanh(hashcodes)

        loss = triplet_loss(hashcodes, labels, args.margin)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        accum_loss += loss.data.item()

    # -------------
    print("epoch: %d, accum_loss: %.6f " % (epoch, accum_loss))
    # s =  'epoch = ' + str(epoch) + ',  accum_loss = ' + str(accum_loss)
    # torch.save(s, args.cv_dir+'/'+str(epoch)+'.txt')


def compute_result_image_baseline(dataloader, net):
    
    # img = []
    # img_name = []
    bs, clses = [], []

    time_start = time.time()
    # for batch_idx, (img_names, images, labels) in enumerate(dataloader):
    for batch_idx, (images, labels) in enumerate(dataloader):

        clses.append(labels.data.cpu())
        with torch.no_grad():
            images, labels = Variable(images).cuda(), Variable(labels).cuda()

        hashFeatures = net.forward(images)

        bs.append(hashFeatures.data.cpu())
    total_time = time.time() - time_start

    return torch.sign(torch.cat(bs)), torch.cat(clses), total_time

def test(epoch):

    net.eval()
    
    tst_binary, tst_label, tst_time = compute_result_image_baseline(test_loader, net)
    db_binary, db_label, db_time = compute_result_image_baseline(db_loader, net)
    # print('test_codes_time = %.6f, db_codes_time = %.6f'%(tst_time ,db_time))

    mAP = compute_mAP_MultiLabels(db_binary, tst_binary, db_label, tst_label)
    print("epoch: %d, retrieval mAP: %.6f" %(epoch, mAP))
    # logger.add_scalar('retrieval_mAP', mAP, epoch)

    torch.save(net.state_dict(), args.cv_dir+'/ckpt_E_%d_mAP_%.5f_net.t7'%(epoch, mAP))
    f = open('result/'+args.cv_dir+'_baseline_mAP.txt', 'a') 
    f.write('Epoch:'+str(epoch)+'  mAP = '+str(mAP)+'\n')
    f.close()


start_epoch = 0
total_tst_time = 0
test_cnt = 0
loss_print = 0
MODEL_UPDATE_ITER = 0

train_loader, test_loader, db_loader = init_voc_dataloader(args)
code_length = 32
# --- 初始化方法 ------------------------------
# torch.nn.init.xavier_uniform(tensor, gain=1)  # 初始化服从均匀分布U(−a,a)
# torch.nn.init.xavier_normal(tensor, gain=1)  # 初始化服从高斯分布N(0,std)
# torch.nn.init.kaiming_uniform(tensor, a=0, mode='fan_in')  # 初始化服从均匀分布U(−bound,bound)
# torch.nn.init.kaiming_normal(tensor, a=0, mode='fan_in')  # 初始化服从高斯分布N(0,std)

# m.fc.bias.data.fill_(0)
# nn.init.xavier_uniform_(m.weight,gain=0.5)
# --------------------------------------------
# --- alexnet -----------------------------------
net = models.alexnet(pretrained=True)
net.classifier[6] = nn.Linear(4096, code_length)
net.classifier[6].bias.data.fill_(0)
nn.init.xavier_uniform_(net.classifier[6].weight,gain=0.5)
net.cuda()
# -----------------------------------------------
# # --- resnet18 -----------------------------------
# net = models.resnet18(pretrained=True)
# net.fc = nn.Linear(512, code_length)
# net.fc.bias.data.fill_(0)
# nn.init.xavier_uniform_(net.fc.weight,gain=0.5)
# net.cuda()
# # -----------------------------------------------

# nn.init.kaiming_normal_(net.classifier[6].weight)
# torch.nn.init.constant_(net.classifier[6].bias, 0)
# nn.init.xavier_uniform_(net.classifier[6].weight)
# optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# optimizer = optim.Adam(list(net_1.parameters())+list(net_2.parameters()), lr=args.lr, weight_decay=args.weight_decay)

# label2vec_dict = torch.load('cifar10_file/embedding_weights_for_id2word').cuda()
# cos_weights = torch.load('cifar10_file/cos_weights').cuda()
# net = Net().cuda()
# optimizer_image = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

# epoch = 0
# train(epoch)
# net = train(epoch, net)
# test(epoch)

# train 1
for epoch in range(start_epoch, start_epoch+args.max_epochs+1):

    # lr_scheduler_image.adjust_learning_rate(epoch)
    # net = train(epoch, net)
    train(epoch)
    if epoch % 10 == 0:
        test(epoch)


# f = open('../../../data2/zhuangyu/nuswide/result/nus_'+str(args.hash_len)+'bit_baseline_maml.txt', 'a') 
# f.write('Epoch:'+str(epoch)+' i2i_mAP='+str(i2i_mAP)+' t2t_mAP='+str(t2t_mAP)+' i2t_mAP='+str(i2t_mAP)+' t2i_mAP='+str(t2i_mAP)+' avg_mAP='+str(this_avg_map)+'\n')
# f.close()

# f = open('../../../data2/zhuangyu/nuswide/result/nus_'+str(args.hash_len)+'bit_baseline_maml.txt', 'a') 
# f.write('Epoch:'+str(epoch)+' i2i_mAP='+str(i2i_mAP)+' t2t_mAP='+str(t2t_mAP)+' i2t_mAP='+str(i2t_mAP)+' t2i_mAP='+str(t2i_mAP)+' avg_mAP='+str(this_avg_map)+'\n')
# f.close()