import os
import sys

import sys 
# sys.path.append("..") 
sys.path.append("./") 
sys.path.append("./model")

import torch
import argparse
import logging
import sklearn
from os.path import join
from datetime import datetime
from torch.utils.model_zoo import load_url
from google_drive_downloader import GoogleDriveDownloader as gdd


# import test_3d
# import util
import commons
import datasets.datasets_ws as datasets_ws
from model import network

from model.extractors.FeatureVectorExtractor import FeatureVectorExtractor

# from model.common.Context import Context

from tqdm import tqdm
import yaml

from model.search.LocalFeatureSet2 import LocalFeatureSet2
from torch.utils.data.dataloader import DataLoader
import model.utils.parse_args as parse_args

context = argparse.Namespace()
args,config = parse_args.parse_arguments("./configs/test_3dtiles_3d.yaml")
context.args = args
context.config = config

# print("check config!! ", config)

start_time = datetime.now()
context.save_dir = join("log", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
commons.setup_logging(context.save_dir)
# commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {context.save_dir}")



###########################################FE###########################################
model = FeatureVectorExtractor(args)
model = model.to(args.device)


# Enable DataParallel after loading checkpoint, otherwise doing it before
# would append "module." in front of the keys of the state dict triggering errors
model = torch.nn.DataParallel(model)


####################################

from datasets.dataset_3dtiles import Dataset3DTiles

if args.data.dataset_type == "3DTiles":
    test_ds = Dataset3DTiles(context)

context.dataset = test_ds
context.fve_model = model
# context.test_method = args.test_method

def _getBaseFeatureSet(args,ctx):
    feature_set = LocalFeatureSet2(ctx)
    feature_set.set_feature_dir(args.features.features_dir)
    if feature_set.read() < 0:
        eval_ds = ctx.dataset
        # pca = ctx.pca
        model = ctx.fve_model.eval()
        # test_method = ctx.test_method
        eval_ds.test_method = "hard_resize"
        device = args.device
        database_subset_ds = eval_ds.getDatabaseSubset();
        database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.data.num_workers,
                                         batch_size=args.data.infer_batch_size, pin_memory=( device == "cuda"))
        with torch.no_grad():
            logging.debug("Extracting database features for evaluation/testing")
            for inputs,indices  in tqdm(database_dataloader, ncols=100):
                
                imgid = indices.numpy()[0]
                imgname = eval_ds.get_filename_byid(imgid)

                if(torch.is_tensor(inputs)):
                    inputs = inputs.to(device)
                features = model(inputs)

                descriptors = features["descriptors"]
                if descriptors is None:
                    continue

                if(torch.is_tensor(descriptors)):
                    descriptors = descriptors.cpu().numpy()
                    features["descriptors"] = descriptors
                    
                img_h = inputs['image'].shape[1]
                img_w = inputs['image'].shape[2]
            

                features["image_id"] =imgid
                features["image_name"] =imgname
                features["attrs"] = {
                    "id":imgid,
                    "image_width":img_w,
                    "image_height":img_h
                }
                feature_set.add_features(imgid,features)
    
    return feature_set

#计算features.h5中，每个特征点的三维坐标
def _getPoint3DCoordinates(args,context):
    feature_set = LocalFeatureSet2()
    feature_set.set_feature_dir(args.features.features_dir)
    if feature_set.read() < 0:
        return -1
    
    return 0

base_feature_set = _getBaseFeatureSet(args,context)