
import sys 
sys.path.append("..") 
sys.path.append("./") 

# import faiss
# import torch
import logging
import numpy as np
# from tqdm import tqdm
# from torch.utils.data import DataLoader
# from torch.utils.data.dataset import Subset
from model.common.common import timestr
# from model.search.LocFeature2ImgIndex import LocFeature2ImgIndex
# from model.search.LocalFeatureSet import LocalFeatureSet
# from model.postprocess.process import process_featureset, process_image
# from model.search.LocalFeatureIndex import LocalFeatureIndex
import yaml

# import torch.profiler


"""
With this script you can evaluate checkpoints or test models from two popular
landmark retrieval github repos.
The first is https://github.com/naver/deep-image-retrieval from Naver labs,
provides ResNet-50 and ResNet-101 trained with AP on Google Landmarks 18 clean.
$ python eval.py --off_the_shelf=naver --l2=none --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048

The second is https://github.com/filipradenovic/cnnimageretrieval-pytorch from
Radenovic, provides ResNet-50 and ResNet-101 trained with a triplet loss
on Google Landmarks 18 and sfm120k.
$ python eval.py --off_the_shelf=radenovic_gldv1 --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
$ python eval.py --off_the_shelf=radenovic_sfm --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048

Note that although the architectures are almost the same, Naver's
implementation does not use a l2 normalization before/after the GeM aggregation,
while Radenovic's uses it after (and we use it before, which shows better
results in VG)
"""

import os
import sys 

sys.path.append("..") 
sys.path.append("./") 
sys.path.append("../model")

import argparse
import model.utils.parse_args as parse_args

import logging

from os.path import join
from datetime import datetime



# import test_3d
# import util
# import commons
# import datasets.datasets_ws as datasets_ws
# from model import network

# from model.extractors.FeatureVectorExtractor import FeatureVectorExtractor

# from model.common.Context import Context

from tqdm import tqdm

from model.search.LocalFeatureSet2 import LocalFeatureSet2
from model.search.GlobalFeatureIndex import GlobalFeatureIndex
######################################### SETUP #########################################

context = argparse.Namespace()
args,config = parse_args.parse_arguments("../configs/test_image_search.yaml")
context.args = args
context.config = config


start_time = datetime.now()
args.save_dir = join("log", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
# commons.setup_logging(args.save_dir)
# commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {args.save_dir}")


from datasets.datasets_images import BaseDataset
test_ds = BaseDataset(args, args.data.datasets_folder,args.data.dataset_name, "test")
logging.info(f"Test set: {test_ds}")


######################################### FeatureSet #########################################

db_featureset = LocalFeatureSet2()
db_featureset.set_feature_dir(join(args.data.datasets_folder,args.data.dataset_name,args.features.features_dir))
db_featureset.read()

query_featureset = LocalFeatureSet2()
query_featureset.set_feature_dir(join(args.data.datasets_folder,args.data.dataset_name,args.features.query_features_dir))
query_featureset.read() 


######################################### INDEXS #########################################

db_index = GlobalFeatureIndex()
db_index.build_from_featureset(db_featureset)

######################################### TEST on TEST SET #########################################

query_result_dict = {}

pbar1 = tqdm(len(query_featureset.get_features().items()))
for id,query in query_featureset.get_features().items():
    global_desc = query['global_descriptor']
    global_desc = global_desc.reshape(1,-1)
    diss,preds = db_index.search(global_desc,20)
    query_result_dict[id] = preds[0]
    pbar1.update()
pbar1.close()
    
######################################### TEST on TEST SET #########################################


pbar = tqdm(len(query_result_dict.items()))
recalls = np.zeros(len(args.val.recall_values))
for queryid, pred in query_result_dict.items():
    queryid = queryid + test_ds.database_num
    pbar.update()
    for i, n in enumerate(args.val.recall_values): 
        q_res = test_ds.query_positives(queryid,pred[:n])
        if q_res.shape[0] > 0:
            recalls[i:] += 1
            break
# recalls = recalls / eval_ds.queries_num * 100
pbar.close()
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.val.recall_values, recalls)])
print(recalls_str)