
import sys 
sys.path.append("..") 
sys.path.append("./") 

import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
from model.common.common import timestr
from model.search.LocFeature2ImgIndex import LocFeature2ImgIndex
from model.search.LocalFeatureSet import LocalFeatureSet
from model.postprocess.process import process_featureset, process_image
from model.search.LocalFeatureIndex import LocalFeatureIndex
import yaml

# import torch.profiler


"""
With this script you can evaluate checkpoints or test models from two popular
landmark retrieval github repos.
The first is https://github.com/naver/deep-image-retrieval from Naver labs,
provides ResNet-50 and ResNet-101 trained with AP on Google Landmarks 18 clean.
$ python eval.py --off_the_shelf=naver --l2=none --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048

The second is https://github.com/filipradenovic/cnnimageretrieval-pytorch from
Radenovic, provides ResNet-50 and ResNet-101 trained with a triplet loss
on Google Landmarks 18 and sfm120k.
$ python eval.py --off_the_shelf=radenovic_gldv1 --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
$ python eval.py --off_the_shelf=radenovic_sfm --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048

Note that although the architectures are almost the same, Naver's
implementation does not use a l2 normalization before/after the GeM aggregation,
while Radenovic's uses it after (and we use it before, which shows better
results in VG)
"""

import os
import sys
import torch
import parser_process
import logging
import sklearn
from os.path import join
from datetime import datetime
from torch.utils.model_zoo import load_url
from google_drive_downloader import GoogleDriveDownloader as gdd


# import test_3d
import util
import commons
import datasets.datasets_ws as datasets_ws
from model import network

from model.extractors.FeatureVectorExtractor import FeatureVectorExtractor

from model.common.Context import Context

from tqdm import tqdm

######################################### SETUP #########################################
args = parser_process.parse_arguments()
# args.resume = "checkpoints/retrievalSfM120k-vgg16-gem-b4dcdc6.pth"
args.dataset_name = "pitts30k"

with open(args.config, "r") as f:
    config = yaml.safe_load(f)
print("check config!! ", config)

start_time = datetime.now()
args.save_dir = join("log", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
commons.setup_logging(args.save_dir)
commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {args.save_dir}")

######################################### MODEL #########################################
model = FeatureVectorExtractor(args)
model = model.to(args.device)


# Enable DataParallel after loading checkpoint, otherwise doing it before
# would append "module." in front of the keys of the state dict triggering errors
model = torch.nn.DataParallel(model)

if args.pca_dim is None:
    pca = None
else:
    full_features_dim = args.features_dim
    args.features_dim = args.pca_dim
    pca = util.compute_pca(args, model, args.pca_dataset_folder, full_features_dim)

from datasets.dataset_sp import DatasetSP
######################################### DATASETS #########################################

datasets_folder = config["data"]["datasets_folder"]
dataset_name = config["data"]["dataset_name"]
datasets_type = config["data"]["dataset_type"]

if args.datasets_type == "sp":
    test_ds = DatasetSP(args, datasets_folder, dataset_name, "test")
else:
    test_ds = datasets_ws.BaseDataset(args, datasets_folder, dataset_name, "test")
logging.info(f"Test set: {test_ds}")

######################################### TEST on TEST SET #########################################
context = Context()
context.dataset = test_ds
context.fve_model = model
context.test_method = args.test_method
context.pca = pca
context.config = config
context.args = args
# config.args = args

def _getBaseFeatureSet(cfg,ctx):
    feature_set = LocalFeatureSet(cfg,ctx)
    if feature_set.read() < 0:
        eval_ds = ctx.dataset
        pca = ctx.pca
        model = ctx.fve_model.eval()
        test_method = ctx.test_method
        eval_ds.test_method = "hard_resize"
        device = cfg["device"]
        database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
        database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=cfg["data"]["num_workers"],
                                         batch_size=cfg["data"]["infer_batch_size"], pin_memory=( device == "cuda"))
        with torch.no_grad():
            logging.debug("Extracting database features for evaluation/testing")
            for inputs, indices in tqdm(database_dataloader, ncols=100):
                features = model(inputs.to(device))
                descriptors = features["descriptors"]
                if(torch.is_tensor(descriptors)):
                    descriptors = descriptors.cpu().numpy()
                    features["descriptors"] = descriptors
                    
                imgid = indices.numpy()[0]
                feature_set.add_features(imgid,features)
    
    return feature_set

baseIndex = LocalFeatureIndex(context)
if baseIndex.read() < 0:
    baseFeatureSet = _getBaseFeatureSet(config,context)
    baseIndex.build_from_featureset(baseFeatureSet)


query_feature_set = LocalFeatureSet(config,context)
query_feature_set.set_feature_dir(config["features"]["query_features_dir"])
query_feature_set.read()


query_result_dict = dict()

with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler("./log/profiler/" + timestr()),
record_shapes=True,
profile_memory=True,
with_stack=True
) as profiler:
    
    featureset= query_feature_set.get_features()
    count = len(featureset.items())
    pbar = tqdm(total=count)
    for imgid,features in featureset.items():

        query_index = imgid 
        descriptors = features["descriptors"]
        # if(torch.is_tensor(descriptors)):
        #     descriptors = descriptors.cpu().numpy()
        #     features["descriptors"] = descriptors
        # if pca is not None:
        #     descriptors = pca.transform(descriptors)
        #     features["descriptors"] = descriptors
        
        # distances, predictions = faiss_index.search(features, max(args.recall_values))
        # predictions_ids = ind_index.searchArr2d(predictions)
        # predictions_ids_top_k = ind_index.most_common(predictions_ids,max(args.recall_values))
        num = max(config["val"]["recall_values"])
        predictions_ids_top_k = baseIndex.search(features,num)
        
        query_result_dict[imgid] = predictions_ids_top_k
        profiler.step()
        pbar.update()
    
eval_ds = test_ds
recalls = np.zeros(len(args.recall_values))
for queryid, pred in query_result_dict.items():
    for i, n in enumerate(args.recall_values):
        q_res = eval_ds.query_positives(queryid,pred[:n])
        if q_res.shape[0] > 0:
            recalls[i:] += 1
            break
# recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
print(recalls_str)
    # return recalls, recalls_str

pass
            

def test(args, context):
    """Compute features of the given dataset and compute the recalls."""
    eval_ds = context.dataset
    pca = context.pca
    model = context.fve_model.eval()
    test_method = context.test_method
        
    with torch.no_grad():
        logging.debug("Extracting database features for evaluation/testing")
        # For database use "hard_resize", although it usually has no effect because database images have same resolution
        eval_ds.test_method = "hard_resize"
        database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
        database_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
                                         batch_size=args.infer_batch_size, pin_memory=(args.device == "cuda"))
        

        # faiss_index = faiss.IndexFlatL2(args.features_dim)
        # ind_index = LocFeature2ImgIndex()
        baseIndex = LocalFeatureIndex(args,context)
        if baseIndex.read() < 0:
            feature_set = _getFeatureSet(args,context)

                
        feature_index = LocalFeatureSet(args)
        logging.debug("Read features set")
        res = feature_index.read()
        logging.debug("Read features set done! "+"SUCCESS" if res >= 0 else "FALSE!")
        if res < 0:
            for inputs, indices in tqdm(database_dataloader, ncols=100):
                features = model(inputs.to(args.device))
                
                descriptors = features["descriptors"]
                if(torch.is_tensor(descriptors)):
                    descriptors = descriptors.cpu().numpy()
                    features["descriptors"] = descriptors
                if pca is not None:
                    descriptors = pca.transform(descriptors)
                    features["descriptors"] = descriptors
                    
                imgid = indices.numpy()[0]
                feature_index.add_features(imgid,features)
            
                # fcnt = len(features)
                # ind_index.addIndices(imgid,fcnt)
                # faiss_index.add(features)
            # faiss.write_index()
            feature_index.write()
        
        if len(args.fve_post_process) > 0:
            feature_index2 = process_feature_set(args.fve_post_process,args,feature_index,context=context)
            feature_index = feature_index2
        
        
        logging.debug("Extracting queries features for evaluation/testing")
        queries_infer_batch_size = 1
        eval_ds.test_method = test_method
              
        queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
        queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
                                        batch_size=queries_infer_batch_size, pin_memory=(args.device == "cuda"))
        
        query_result_dict = dict()

        with torch.profiler.profile(
        schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
        on_trace_ready=torch.profiler.tensorboard_trace_handler("./log/profiler/" + timestr()),
        record_shapes=True,
        profile_memory=True,
        with_stack=True
        ) as profiler:
            for inputs, indices in tqdm(queries_dataloader, ncols=100):

                query_index = indices.numpy()[0]
                # if not query_index % 10 == 0:
                #     continue

                if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
                    inputs = torch.cat(tuple(inputs))  # shape = 5*bs x 3 x 480 x 480
                features = model(inputs.to(args.device))

                descriptors = features["descriptors"]
                if(torch.is_tensor(descriptors)):
                    descriptors = descriptors.cpu().numpy()
                    features["descriptors"] = descriptors
                if pca is not None:
                    descriptors = pca.transform(descriptors)
                    features["descriptors"] = descriptors
                    
                if len(args.fve_post_process_query) > 0:
                    features = process_image(args.fve_post_process_query,args,features)
                    descriptors = features["descriptors"]
              
                # distances, predictions = faiss_index.search(features, max(args.recall_values))
                # predictions_ids = ind_index.searchArr2d(predictions)
                # predictions_ids_top_k = ind_index.most_common(predictions_ids,max(args.recall_values))
                
                predictions_ids_top_k = feature_index.search_image(descriptors)
                
                queryid = indices.numpy()[0]
                query_result_dict[queryid] = predictions_ids_top_k
                profiler.step()
            

            recalls = np.zeros(len(args.recall_values))
            for queryid, pred in query_result_dict.items():
                for i, n in enumerate(args.recall_values):
                    q_res = eval_ds.query_positives(queryid,pred[:n])
                    if q_res.shape[0] > 0:
                        recalls[i:] += 1
                        break
            # recalls = recalls / eval_ds.queries_num * 100
            recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
            print(recalls_str)
            return recalls, recalls_str

    #         for i, n in enumerate(args.recall_values):
    #             if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
    #                 recalls[i:] += 1
    #                 break
    # # Divide by the number of queries*100, so the recalls are in percentages
    # recalls = recalls / eval_ds.queries_num * 100
    # recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
        
#     queries_features = all_features[eval_ds.database_num:]
#     database_features = all_features[:eval_ds.database_num]
    

#     del database_features, all_features
    
#     logging.debug("Calculating recalls")
#     distances, predictions = faiss_index.search(queries_features, max(args.recall_values))
    
#     if test_method == 'nearest_crop':
#         distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
#         predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
#         for q in range(eval_ds.queries_num):
#             # sort predictions by distance
#             sort_idx = np.argsort(distances[q])
#             predictions[q] = predictions[q, sort_idx]
#             # remove duplicated predictions, i.e. keep only the closest ones
#             _, unique_idx = np.unique(predictions[q], return_index=True)
#             # unique_idx is sorted based on the unique values, sort it again
#             predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
#         predictions = predictions[:, :20]  # keep only the closer 20 predictions for each query
#     elif test_method == 'maj_voting':
#         distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
#         predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
#         for q in range(eval_ds.queries_num):
#             # votings, modify distances in-place
#             top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
#             top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
#             top_n_voting('top10', predictions[q], distances[q], args.majority_weight)

#             # flatten dist and preds from 5, 20 -> 20*5
#             # and then proceed as usual to keep only first 20
#             dists = distances[q].flatten()
#             preds = predictions[q].flatten()

#             # sort predictions by distance
#             sort_idx = np.argsort(dists)
#             preds = preds[sort_idx]
#             # remove duplicated predictions, i.e. keep only the closest ones
#             _, unique_idx = np.unique(preds, return_index=True)
#             # unique_idx is sorted based on the unique values, sort it again
#             # here the row corresponding to the first crop is used as a
#             # 'buffer' for each query, and in the end the dimension
#             # relative to crops is eliminated
#             predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
#         predictions = predictions[:, 0, :20]  # keep only the closer 20 predictions for each query

#     #### For each query, check if the predictions are correct
#     positives_per_query = eval_ds.get_positives()
#     # args.recall_values by default is [1, 5, 10, 20]
#     recalls = np.zeros(len(args.recall_values))
#     for query_index, pred in enumerate(predictions):
#         for i, n in enumerate(args.recall_values):
#             if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
#                 recalls[i:] += 1
#                 break
#     # Divide by the number of queries*100, so the recalls are in percentages
#     recalls = recalls / eval_ds.queries_num * 100
#     recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
#     return recalls, recalls_str


# def top_n_voting(topn, predictions, distances, maj_weight):
#     if topn == 'top1':
#         n = 1
#         selected = 0
#     elif topn == 'top5':
#         n = 5
#         selected = slice(0, 5)
#     elif topn == 'top10':
#         n = 10
#         selected = slice(0, 10)
#     # find predictions that repeat in the first, first five,
#     # or fist ten columns for each crop
#     vals, counts = np.unique(predictions[:, selected], return_counts=True)
#     # for each prediction that repeats more than once,
#     # subtract from its score
#     for val, count in zip(vals[counts > 1], counts[counts > 1]):
#         mask = (predictions[:, selected] == val)
#         distances[:, selected][mask] -= maj_weight * count/n
