import os
import time
import argparse
import numpy as np
import tool
from loss import FineLoss, CoarseLoss, TripleLoss2, TripleLoss

import nets
import torch
import cvusa as dataset
from torch.utils.data import DataLoader


parser = argparse.ArgumentParser(description='cosplace in Pytorch implementation.')

# env parser
parser.add_argument("--num_worker",           type=int,  default=8, help="_")
parser.add_argument('--cuda',                 type=tool.str2bool, help='cuda', default=True)
parser.add_argument('--project_data_dir',     type=str,  help='project data save dir',
                    default='/home/a409/users/lihaowei/my_model/patch-DSM/')
parser.add_argument('--dataset_dir',          type=str, help='dataset dir',
                    default='/home/a409/users/lihaowei/data/CVUSA_DSM/')

# model parser
parser.add_argument('--dimension',   type=int, default=512, help='out channel ')
parser.add_argument("--backbone",    type=str, default="resnet18",
                    choices=["vgg16", "resnet18", "resnet50", "resnet101", "resnet152", "vig"], help="_")
parser.add_argument('--angle',       type=int,   help='angle of ground image', default=180,
                    choices=[70, 90, 180, 360])
parser.add_argument('--tiny_split',  type=tool.str2bool, help='whether to use the whole CVUSA set', default=True)

# train parser
parser.add_argument('--mode', type=str, help='run to train or test', default='train')
parser.add_argument('--train_from_begin',  type=tool.str2bool,  help='whether to train from begin', default=True)
parser.add_argument('--start_epoch',       type=int,   help='train from epoch',                   default=0)
parser.add_argument('--batch_size',        type=int,   help='number of batch size',               default=128)
parser.add_argument('--loss_weight',       type=float, help='loss_weight',                        default=100.0)
parser.add_argument('--number_of_epoch',   type=int,   help='number of epoch',                    default=1000)
parser.add_argument('--learning_rate_val', type=float, help='learning rate val',                  default=1e-3)


args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")


ANGLE_DIM = {
        360: [77, 39],
        180: [58, 20],
        90: [49, 10],
        70: [46, 8],
    }


def train(epoch, model, sat_feature, grd_feature, angle, project_name, sat_index, cor_index):
    angle = torch.tensor(angle)
    cor_index = torch.tensor(cor_index)
    train_size = grd_feature.shape[0]
    loss_writer_path = args.project_data_dir + project_name + '/train/'
    os.makedirs(os.path.dirname(loss_writer_path), exist_ok=True)
    epoch_loss = 0
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate_val,
                                 betas=(0.9, 0.999), eps=1e-8)
    batch_size = 16
    loss_fine = TripleLoss()
    t3 = time.time()

    size = sat_feature.shape
    shuffle_index = np.array(range(size[0]))
    np.random.shuffle(shuffle_index)
    shuffle_index = torch.tensor(shuffle_index)
    num = shuffle_index.shape[0] % batch_size
    if num == 0:
        shuffle_index = shuffle_index.reshape(-1, batch_size)
    else:
        shuffle_index = shuffle_index[:-num].reshape(-1, batch_size)

    ii = 0
    for i in shuffle_index:
        grd_image = grd_feature[i].to(device)
        sat_image = sat_feature[sat_index[i]].reshape(-1, size[1], size[2], size[3]).to(device)

        ori, option_ori = model.forward(grd_image, sat_image)

        optimizer.zero_grad()
        loss = loss_fine(ori, option_ori, angle[i], cor_index[i])
        loss.backward()
        optimizer.step()
        batch_loss = loss.item()
        epoch_loss += batch_loss
        ii += batch_size
        print('Epoch: [%3d][%5d/%d]' % (epoch, ii, train_size))
        del grd_image, sat_image
        torch.cuda.empty_cache()

    file = open(loss_writer_path + 'step2_loss.txt', 'a')
    file.write(str(epoch_loss) + '\n')
    print('epoch %d, average batch loss is : %f' % (epoch, epoch_loss))
    t4 = time.time()
    print('step2 used time is %f s' % (t4 - t3))

    save_path = args.project_data_dir + project_name + '/' + str(epoch) + '.pth'
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    torch.save(model.state_dict(), save_path)
    print('model have been saved at :', save_path)


def test(model, sat_feature, grd_feature, angle, sat_index, cor_index):
    print('   testing...')
    model.eval()
    test_size = sat_feature.shape[0]
    top1_percent = int(0.01 * test_size)
    top_list = [1, 5, 10, top1_percent]
    acc = np.zeros(4)
    acc_ = np.zeros(4)
    for i in range(test_size):
        print(i, test_size)
        if cor_index[i] != -1:
            grd_image = grd_feature[i].to(device)
            grd_image = torch.unsqueeze(grd_image, 0)
            sat_image = sat_feature[sat_index[i]].to(device)

            with torch.no_grad():
                ori, option_ori = model.forward(grd_image, sat_image)
            acc += fine_val(ori, option_ori, angle[i], cor_index[i], top_list)
            acc_ += fine_val_se(ori, option_ori, angle[i], cor_index[i], top_list)
    print('Fine Result', ':')
    print('     top1', ':', 100 * acc[0] / test_size)
    print('     top5', ':', 100 * acc[1] / test_size)
    print('     top10', ':', 100 * acc[2] / test_size)
    print('     top1.0%%(top%d):' % top1_percent, 100 * acc[3] / test_size)
    print(acc_)


def coarse_val(satcode, grdcode):
    print('   valing...')
    sat_global_descriptor = satcode.detach().numpy()
    grd_global_descriptor = grdcode.detach().numpy()
    top1point5_percent = 0.015 * satcode.shape[0]
    acc_list = []
    top_list = [1, 5, 10, top1point5_percent]
    dist_array = 2 - 2 * np.matmul(sat_global_descriptor, np.transpose(grd_global_descriptor))
    for i in top_list:
        accuracy = 0.0
        data_amount = 0.0

        for j in range(dist_array.shape[0]):
            gt_dist = dist_array[j, j]
            prediction = np.sum(dist_array[:, j] < gt_dist)
            if prediction < i:
                accuracy += 1.0
            data_amount += 1.0
        accuracy /= data_amount
        acc_list.extend([accuracy])
    print('Coarse Result', ':')
    print('     top1', ':', 100 * acc_list[0])
    print('     top5', ':', 100 * acc_list[1])
    print('     top10', ':', 100 * acc_list[2])
    print('     top1.5%%(top%d):' % top1point5_percent, 100 * acc_list[3])


def fine_val_se(ori, option_ori, cor_angle, cor_index, top_list):
    angle_list = np.arange(0, 360, 360.0 / option_ori.shape[-1])
    cor_y = tool.getrightlori_test(angle_list, cor_angle)
    cor_x = cor_index

    ori = torch.squeeze(ori)
    distance_mac = 2 - 2 * torch.einsum("j,ijk->ik", [ori, option_ori])
    size = distance_mac.shape

    acc = np.array([[1, 1, 1, 1],
                    [0, 1, 1, 1],
                    [0, 0, 1, 1],
                    [0, 0, 0, 1],
                    [0, 0, 0, 0]])
    distance = distance_mac.detach().reshape(-1).cpu().numpy()
    distance = distance.argsort()
    cor_index = size[1] * cor_x + cor_y

    location = np.where(distance == cor_index)

    for i in range(len(top_list)):
        if location[0] <= top_list[i]:
            return acc[i]
    return acc[-1]


def fine_val(ori, option_ori, cor_angle, cor_index, top_list):
    angle_list = np.arange(0, 360, 360.0 / option_ori.shape[-1])
    cor_y = tool.getrightlori_test(angle_list, cor_angle)
    cor_x = cor_index

    ori = torch.squeeze(ori)
    distance_mac = 2 - 2 * torch.einsum("j,ijk->ik", [ori, option_ori])

    acc = np.array([[1, 1, 1, 1],
                    [0, 1, 1, 1],
                    [0, 0, 1, 1],
                    [0, 0, 0, 1],
                    [0, 0, 0, 0]])
    temp1 = torch.min(distance_mac, dim=1)
    temp2 = temp1[0].argsort().detach().cpu().numpy()
    loc = np.where(temp2 == cor_x)

    for i in range(len(top_list)):
        if loc[0] <= top_list[i]:
            return acc[i]
    return acc[-1]


def main(args):
    print(args)
    # import data
    print('===> Loading dataset(s)')
    whole_train_set = dataset.get_whole_train_set(mode=args.mode, fov=args.angle)
    training_data_loader = DataLoader(dataset=whole_train_set,
                                      num_workers=args.num_worker, batch_size=args.batch_size, shuffle=True,
                                      pin_memory=True)

    train_size = whole_train_set.data_size
    print('Done!  total %d pairs of images to train ;' % whole_train_set.data_size)

    print('loading coarse model..')
    model_coarse = nets.Lsm('resnet18', 512)
    coarse_path = args.project_data_dir + 'coarse/' + '50.pth'
    model_coarse.load_state_dict(torch.load(coarse_path))
    model_coarse = model_coarse.to(device)
    print('Done!')

    print('calculate coarse result')
    angle = []
    sat_feature = torch.zeros((train_size, args.dimension, 7, ANGLE_DIM[args.angle][0]))
    grd_feature = torch.zeros((train_size, args.dimension, 7, ANGLE_DIM[args.angle][1]))
    sat_code = np.zeros((train_size, args.dimension), dtype=np.float32)
    grd_code = np.zeros((train_size, args.dimension), dtype=np.float32)
    ii = 0
    with torch.no_grad():
        for iteration, (sat_img, grd_img, angling) in enumerate(training_data_loader):
            sat_img = sat_img.to(device)
            grd_img = grd_img.to(device)

            sat_featuring, grd_featuring, sat_coding, grd_coding = model_coarse.forward(sat_img, grd_img)

            sat_feature[ii: ii + args.batch_size] = sat_featuring.detach()
            grd_feature[ii: ii + args.batch_size] = grd_featuring.detach()
            sat_code[ii: ii + args.batch_size] = sat_coding.cpu().detach()
            grd_code[ii: ii + args.batch_size] = grd_coding.cpu().detach()
            ii += sat_coding.shape[0]
            angle.extend(angling.tolist())
            del sat_img, grd_img
            print('Loading coarse result: [%5d/%d]' % (ii, train_size))
    print('Done!')

    del whole_train_set, training_data_loader
    del model_coarse
    torch.cuda.empty_cache()
    print('Build the index for fine train')
    if args.mode == 'train':
        sat_index, cor_index = tool.get_index(sat_code, grd_code)
        # sat_index_, cor_index_ = tool.get_index_test(sat_code, grd_code)
    else:
        sat_index, cor_index = tool.get_index_test(sat_code, grd_code)
    print('Done!')
    del sat_code, grd_code
    torch.cuda.empty_cache()

    if args.mode == 'train':
        # create model
        print('===> creating fine model..')
        # #fix me: put 128 to args
        model = nets.PatchDSM(args.dimension, 128)
        model = model.to(device)
        print('Done! ')

        # load_path = args.project_data_dir + '6_19_10_56/130.pth'
        # print('===> Loading trained model from:', load_path)
        # model = nets.PatchDSM(args.dimension, 128)
        # model.load_state_dict(torch.load(load_path))
        # model = model.to(device)

        print('start training')
        t = time.localtime()
        print('start time is : %d,%d,%d:%d:%d' % (t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec))
        project_name = str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min)
        for train_epoch in range(args.start_epoch, args.number_of_epoch):
            train(train_epoch, model, sat_feature, grd_feature, angle, project_name, sat_index, cor_index)
            #test(model, sat_feature, grd_feature, angle, sat_index_, cor_index_)
    else:
        load_path = args.project_data_dir + '6_21_10_27/20.pth'
        print('===> Loading trained model from:', load_path)
        model = nets.PatchDSM(args.dimension, 128)
        model.load_state_dict(torch.load(load_path))
        model = model.to(device)

        print('start training')
        t = time.localtime()
        print('start time is : %d,%d,%d:%d:%d' % (t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec))

        test(model, sat_feature, grd_feature, angle, sat_index, cor_index)


if __name__ == '__main__':
    main(args)
