import os
import time
import argparse
import numpy as np
import tool
from loss import TripleLoss, TripleLoss1, TripleLoss2, TripleLoss3

import nets
import torch
import cvusa as dataset
from torch.utils.data import DataLoader


parser = argparse.ArgumentParser(description='cosplace in Pytorch implementation.')

# env parser
parser.add_argument("--num_worker",           type=int,  default=8, help="_")
parser.add_argument('--cuda',                 type=tool.str2bool, help='cuda', default=True)
parser.add_argument('--project_data_dir',     type=str,  help='project data save dir',
                    default='/home/a409/users/lihaowei/my_model/cosplace/')
parser.add_argument('--dataset_dir',          type=str, help='dataset dir',
                    default='/home/a409/users/lihaowei/data/CVUSA_DSM/')

# model parser
parser.add_argument('--dimension',   type=int, default=512, help='out channel ')
parser.add_argument("--backbone",    type=str, default="resnet18",
                    choices=["vgg16", "resnet18", "resnet50", "resnet101", "resnet152", "vig"], help="_")
parser.add_argument('--angle',       type=int,   help='angle of ground image', default=180,
                    choices=[70, 120, 180, 360])
parser.add_argument('--tiny_split',  type=tool.str2bool, help='whether to use the whole CVUSA set', default=True)

# train parser
parser.add_argument('--mode', type=str, help='run to train or test', default='step2_train',
                    choices=['train', 'test', 'step2_train', 'step2_test'])
parser.add_argument('--train_from_begin',  type=tool.str2bool,  help='whether to train from begin', default=True)
parser.add_argument('--start_epoch',       type=int,   help='train from epoch',                   default=0)
parser.add_argument('--batch_size',        type=int,   help='number of batch size',               default=256)
parser.add_argument('--loss_weight',       type=float, help='loss_weight',                        default=10.0)
parser.add_argument('--number_of_epoch',   type=int,   help='number of epoch',                    default=100)
parser.add_argument('--learning_rate_val', type=float, help='learning rate val',                  default=1e-5)


args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")


def validate_val(grd_descriptor, sat_descriptor, topk):
    accuracy = 0.0
    data_amount = 0.0
    dist_array = 2 - 2 * np.matmul(sat_descriptor, np.transpose(grd_descriptor))

    for i in range(dist_array.shape[0]):
        gt_dist = dist_array[i, i]
        prediction = np.sum(dist_array[:, i] < gt_dist)
        if prediction < topk:
            accuracy += 1.0
        data_amount += 1.0
    accuracy /= data_amount

    return accuracy


def compute_loss2(ori, option_ori, cor_angle, cor_index):
    angle_list = np.arange(0, 360, 360.0/option_ori.shape[-1])
    cor_y = tool.getrightlori(angle_list, cor_angle)
    cor_x = int(cor_index)
    loss_weight = 20
    distance_mac = torch.einsum("j,ijk->ik", [ori, option_ori])
    size = distance_mac.shape
    pos_dist = distance_mac[cor_x][cor_y]

    neg_angle = pos_dist - distance_mac[cor_x, :]
    loss_neg_angle = torch.sum(torch.log(1 + torch.exp(neg_angle * loss_weight)))/size[1]

    neg_location = pos_dist - distance_mac
    loss_neg_location = torch.sum(torch.log(1 + torch.exp(neg_location * loss_weight)))/size[1]/size[0]

    loss = 1.2 * loss_neg_location - 0.2 * loss_neg_angle

    return loss


def compute_loss1(sat_global, grd_global):
    # triple loss
    dist_array = 2 - 2 * torch.matmul(sat_global, grd_global.t())
    pos_dist = torch.diag(dist_array)
    pair_n = args.batch_size * (args.batch_size - 1.0)

    # ground to satellite
    triplet_dist_g2s = pos_dist - dist_array
    loss_g2s = torch.sum(torch.log(1 + torch.exp(triplet_dist_g2s * args.loss_weight))) / pair_n

    # satellite to ground
    triplet_dist_s2g = torch.unsqueeze(pos_dist, 1) - dist_array
    loss_s2g = torch.sum(torch.log(1 + torch.exp(triplet_dist_s2g * args.loss_weight))) / pair_n
    loss = (loss_g2s + loss_s2g) / 2.0

    return loss


def train_step1(epoch, model, train_loader, val_loader, project_name, val_size, train_size):
    # 训练
    epoch_loss = 0
    model.train(mode=True)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate_val,
                                 betas=(0.9, 0.999), eps=1e-8)
    t1 = time.time()
    for iteration, (sat_img, grd_img, index, angle) in enumerate(train_loader):
        sat_img = sat_img.to(device)
        grd_img = grd_img.to(device)

        sat_code, grd_code = model(sat_img, grd_img)
        optimizer.zero_grad()
        loss = compute_loss1(sat_code, grd_code)
        loss.backward()
        optimizer.step()

        batch_loss = loss.item()
        epoch_loss += batch_loss
        print('Epoch: [%3d][%5d/%d]' % (epoch, iteration*args.batch_size, train_size))
    t2 = time.time()
    print('this epoch used time is %f s' % (t2 - t1))

    loss_writer_path = args.project_data_dir + project_name + '/Result/'
    os.makedirs(os.path.dirname(loss_writer_path), exist_ok=True)
    file = open(loss_writer_path + 'loss.txt', 'a')
    file.write(str(epoch_loss / args.batch_size) + '\n')
    print('epoch %d, average batch loss is : %f' % (epoch, epoch_loss / args.batch_size))

    save_path = args.project_data_dir + project_name + '/' + str(epoch) + '.pth'
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    torch.save(model.state_dict(), save_path)
    print('model have been saved at :', save_path)

    if epoch % 1 == 0 or epoch + 1 == args.number_of_epoch:
        val(model, val_loader, val_size, project_name)


def val(model, test_data_loader, test_size, project_name):
    print('   valing...')
    print('   compute global descriptors...')
    model.eval()
    with torch.no_grad():
        sat_global_descriptor = torch.zeros([test_size, args.dimension])
        grd_global_descriptor = torch.zeros([test_size, args.dimension])

        val_i = 0
        for iteration, (sat_img, grd_img, index, angle) in enumerate(test_data_loader):
            sat_img = sat_img.to(device)
            grd_img = grd_img.to(device)

            sat_coding, grd_coding = model(sat_img, grd_img)

            sat_global_descriptor[val_i: val_i + args.batch_size, :] = sat_coding
            grd_global_descriptor[val_i: val_i + args.batch_size, :] = grd_coding

            val_i += sat_coding.shape[0]
            print('Progress of global descriptors: [%5d/%d]' % (iteration * args.batch_size, test_size))

        sat_global_descriptor = sat_global_descriptor.detach().cpu().numpy()
        grd_global_descriptor = grd_global_descriptor.detach().cpu().numpy()
        top1_percent = int(test_size*0.01)
        acc_list = []
        top_list = [50, 80, 100, 150]
        for i in top_list:
            acc_list.append(100 * validate_val(sat_global_descriptor, grd_global_descriptor, i))
        print('Result', ':')
        print('     top1', ':', acc_list[0])
        print('     top5', ':', acc_list[1])
        print('     top10', ':', acc_list[2])
        print('     top1%%(top%d):' % top1_percent, acc_list[3])
        acc_writer_path = args.project_data_dir + project_name + '/Result/'
        os.makedirs(os.path.dirname(acc_writer_path), exist_ok=True)
        file = open(acc_writer_path + 'acc.txt', 'a')
        file.write(str(acc_list) + '\n')


def test(model, test_data_loader, test_size):
    print('   valing...')
    print('   compute global descriptors...')
    model.eval()
    with torch.no_grad():
        sat_global_descriptor = torch.zeros([test_size, args.dimension])
        grd_global_descriptor = torch.zeros([test_size, args.dimension])

        val_i = 0
        path_list = []
        angle_list = []
        for iteration, (sat_img, grd_img, index, angle) in enumerate(test_data_loader):
            path_list.extend(index)
            angle_list.extend(angle)
            sat_img = sat_img.to(device)
            grd_img = grd_img.to(device)

            sat_coding, grd_coding = model(sat_img, grd_img)

            sat_global_descriptor[val_i: val_i + args.batch_size, :] = sat_coding
            grd_global_descriptor[val_i: val_i + args.batch_size, :] = grd_coding

            val_i += sat_coding.shape[0]
            print('Progress of global descriptors: [%5d/%d]' % (iteration * args.batch_size, test_size))

        sat_global_descriptor = sat_global_descriptor.detach().cpu().numpy()
        grd_global_descriptor = grd_global_descriptor.detach().cpu().numpy()

        topk = int(0.015 * test_size)
        acc = validate_val(grd_global_descriptor, sat_global_descriptor, topk)
        print(topk)
        print(acc)
        # print(validate_val(grd_global_descriptor, sat_global_descriptor, 1))
        # exit()
        print("start to build step2 splits")
        dist_array = 2 - 2 * np.matmul(sat_global_descriptor, np.transpose(grd_global_descriptor))
        part2_save_path = args.dataset_dir + 'part2_splits/'
        os.makedirs(os.path.dirname(part2_save_path), exist_ok=True)
        file = open(part2_save_path + 'part2_angle.txt', 'a')
        file.write(str(angle_list) + '\n')
        file = open(part2_save_path + 'part2_path.txt', 'a')
        file.write(str(path_list) + '\n')

        file1 = open(part2_save_path + 'part2_index.txt', 'a')
        file2 = open(part2_save_path + 'part2_index_sat.txt', 'a')
        for i in range(dist_array.shape[0]):
            gt_dist = dist_array[i, i]
            prediction = np.sum(dist_array[:, i] < gt_dist)
            if prediction < topk + 1:
                file1.write(str(i) + '\n')
                # topK加1是因为可能出现错误位置和对角位置数值一样的情况
                temp = np.argsort(dist_array[:, i])[:topk+1].tolist()
                file2.write(str(temp) + '\n')

        print('Done!')


def main(args):
    print(args)
    if args.mode == 'train':
        # import data
        print('===> Loading dataset(s)')
        whole_train_set = dataset.get_whole_training_set(mode='train_part1', angle=args.angle)
        training_data_loader = DataLoader(dataset=whole_train_set,
                                          num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True,
                                          pin_memory=True)
        whole_val_set = dataset.get_whole_val_set(mode='train_part2', angle=args.angle)
        val_data_loader = DataLoader(dataset=whole_val_set,
                                     num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True,
                                     pin_memory=True)
        val_size = whole_val_set.data_size
        train_size = whole_train_set.data_size
        print('Done!  total %d pairs of images to train ;' % whole_train_set.data_size)
        print('       total %d pairs of images to val ;' % whole_val_set.data_size)

        # # create model
        # print('===> creating model..')
        # model = nets.CosPlace(args.backbone, args.dimension)
        # model = model.to(device)
        # print('Done! ')
        load_path = args.project_data_dir + '5_5_16_11/50.pth'
        print('===> Loading trained model from:', load_path)
        model = nets.CosPlace(args.backbone, args.dimension)
        model.load_state_dict(torch.load(load_path))
        model = model.to(device)
        print('Done!')

        print('start training')
        t = time.localtime()
        print('start time is : %d,%d,%d:%d:%d' % (t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec))
        project_name = str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min)
        for train_epoch in range(args.start_epoch, args.number_of_epoch):
            train_step1(train_epoch, model, training_data_loader, val_data_loader, project_name, val_size, train_size)

    if args.mode == 'test':
        print('===> Loading dataset(s)')
        whole_test_set = dataset.get_whole_test_set(mode='train_part2', angle=args.angle)
        test_data_loader = DataLoader(dataset=whole_test_set,
                                      num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True,
                                      pin_memory=True)
        test_size = whole_test_set.data_size
        print('Done!  total %d pairs of images to test ;' % test_size)
        # loading model
        print('===> loadinging model..')
        load_path = args.project_data_dir + '5_6_17_0/5.pth'
        print('===> Loading trained model from:', load_path)
        model = nets.CosPlace(args.backbone, args.dimension)
        model.load_state_dict(torch.load(load_path))
        model = model.to(device)
        print('Done!')

        print('start testing')
        t = time.localtime()
        print('start time is : %d,%d,%d:%d:%d' % (t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec))
        test(model, test_data_loader, test_size)

    if args.mode == "step2_train":
        print('===> Loading dataset(s)')
        whole_train_set = dataset.get_train2_set(mode='train', angle=args.angle)
        train_data_loader = DataLoader(dataset=whole_train_set,
                                       num_workers=args.num_worker, batch_size=1, shuffle=False,
                                       pin_memory=True)
        data_size = len(whole_train_set)
        print('Done!  total %d pairs of images to train ;' % int(0.9 * data_size))
        print('       total %d pairs of images to val;' % int(0.1 * data_size))

        model = nets.OriPlace(args.backbone)
        model = model.to(device)

        # load_path = args.project_data_dir + '5_16_16_46/3.pth'
        # print('===> Loading trained model from:', load_path)
        # model = nets.OriPlace(args.backbone)
        # model.load_state_dict(torch.load(load_path))
        # model = model.to(device)
        print('Done!')

        print('start training')
        t = time.localtime()
        print('start time is : %d,%d,%d:%d:%d' % (t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec))
        project_name = str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min)
        for train_epoch in range(args.start_epoch, args.number_of_epoch):
            train_step2(train_epoch, model, train_data_loader, project_name, int(0.9 * data_size), int(0.1 * data_size))

    if args.mode == "step2_test":
        print('===> Loading dataset(s)')
        whole_train_set = dataset.get_train2_set(mode='train', angle=args.angle)
        train_data_loader = DataLoader(dataset=whole_train_set,
                                       num_workers=args.num_worker, batch_size=1, shuffle=False,
                                       pin_memory=True)
        data_size = len(whole_train_set)
        print('Done!  total %d pairs of images to test ;' % int(data_size))

        # loading model
        print('===> loadinging model..')
        load_path = args.project_data_dir + '5_16_16_46/60.pth'
        print('===> Loading trained model from:', load_path)
        model = nets.OriPlace(args.backbone)
        model.load_state_dict(torch.load(load_path))
        model = model.to(device)
        print('Done!')

        # # create model
        # print('===> creating model..')
        # model = nets.OriPlace(args.backbone)
        # model = model.to(device)
        # print('Done! ')

        epoch = 0
        acc_ = 0
        acc__ = 0
        model.eval()
        for iteration, (sat_img, grd_img, cor_angle, cor_index) in enumerate(train_data_loader):
            if iteration < 3161:
                sat_img = torch.squeeze(sat_img, 0)

                # grd_img = grd_img.repeat(sat_img.shape[0], 1, 1, 1) # nead??

                sat_img = sat_img.to(device)
                grd_img = grd_img.to(device)

                ori, option_ori = model(sat_img, grd_img)

                angle_list = np.arange(0, 360, 360.0 / option_ori.shape[-1])
                cor_y = tool.getrightlori(angle_list, cor_angle)
                cor_x = cor_index

                distance_mac = 2 - 2 * torch.einsum("j,ijk->ik", [ori, option_ori])


                # 按行取最大
                temp1 = torch.min(distance_mac, dim=1)
                temp2 = torch.min(temp1[0], dim=0)
                pre_y = temp1[1][temp2[1]]
                # print(temp2[1], pre_y)

                if temp2[1].item() == cor_x:
                    acc_ += 1
                    if pre_y == cor_y:
                        acc__ += 1

                # print(distance_mac)
                # print(torch.min(distance_mac))
                # print(distance_mac[cor_x.item()][cor_y])
                # exit()
            # if iteration > 100 :
            #         break
            # if iteration == 200:
            #     break
            print('[%5d/%d]' % (iteration, data_size))

        print(acc_)
        print(acc__)


def train_step2(epoch, model, train_data_loader, project_name, train_size, val_size):
    epoch_loss = 0
    # for name, p in model.named_parameters():
    #     print(name)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate_val,
                                 betas=(0.9, 0.999), eps=1e-8)

    t1 = time.time()
    acc = 0
    acc_ori = 0

    loss_ = TripleLoss3(10)

    for iteration, (sat_img, grd_img, cor_angle, cor_index) in enumerate(train_data_loader):
        if iteration < train_size:
            model.train()

            sat_img = torch.squeeze(sat_img, 0)
            # grd_img = grd_img.repeat(sat_img.shape[0], 1, 1, 1) # nead??

            sat_img = sat_img.to(device)
            grd_img = grd_img.to(device)

            ori, option_ori = model(sat_img, grd_img)
            optimizer.zero_grad()
            # loss = compute_loss2(ori, option_ori, cor_angle, cor_index)
            loss = loss_(ori, option_ori, cor_angle, cor_index)

            loss.backward()
            optimizer.step()

            batch_loss = loss.item()
            epoch_loss += batch_loss
            print('Epoch: [%3d][%5d/%d]' % (epoch, iteration, train_size))
        else:

            model.eval()
            sat_img = torch.squeeze(sat_img, 0)
            # grd_img = grd_img.repeat(sat_img.shape[0], 1, 1, 1) # nead??

            sat_img = sat_img.to(device)
            grd_img = grd_img.to(device)

            ori, option_ori = model(sat_img, grd_img)

            angle_list = np.arange(0, 360, 360.0 / option_ori.shape[-1])
            cor_y = tool.getrightlori(angle_list, cor_angle)
            cor_x = cor_index

            distance_mac = torch.einsum("j,ijk->ik", [ori, option_ori])

            # 按行取最大
            temp1 = torch.min(distance_mac, dim=1)
            temp2 = torch.min(temp1[0], dim=0)
            pre_y = temp1[1][temp2[1]]
            # print(temp2[1], pre_y)
            if temp2[1].item() == cor_x:
                acc += 1
                if pre_y == cor_y:
                    acc_ori += 1
            print('Epoch: [%3d][%5d/%d]' % (epoch, iteration - train_size, val_size))
    # t2 = time.time()
    # print('this epoch used time is %f s' % (t2 - t1))
    print(acc / val_size)
    print(acc_ori )
    acc_writer_path = args.project_data_dir + project_name + '/Result/'
    os.makedirs(os.path.dirname(acc_writer_path), exist_ok=True)
    file = open(acc_writer_path + 'acc.txt', 'a')
    file.write(str(acc) + ',' + str(acc_ori) + '\n')

    loss_writer_path = args.project_data_dir + project_name + '/Result/'
    os.makedirs(os.path.dirname(loss_writer_path), exist_ok=True)
    file = open(loss_writer_path + 'loss.txt', 'a')
    file.write(str(epoch_loss) + '\n')
    print('epoch %d, average batch loss is : %f' % (epoch, epoch_loss))

    save_path = args.project_data_dir + project_name + '/' + str(epoch) + '.pth'
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    torch.save(model.state_dict(), save_path)
    print('model have been saved at :', save_path)



if __name__ == '__main__':
    main(args)
