

import sys 
sys.path.append("..") 
sys.path.append("./") 

import numpy as np
import cv2
import h5py
import os
import sys
import torch
import parser_3d
import logging
import sklearn
from os.path import join
from datetime import datetime
from torch.utils.model_zoo import load_url
from google_drive_downloader import GoogleDriveDownloader as gdd

import util

import commons
import datasets.datasets_ws as datasets_ws
from model.search.LocalFeatureSet import LocalFeatureSet



from utils.draw import draw_keypoints

def read_image(impath):
    """ Read image as grayscale and resize to img_size.
    Inputs
        impath: Path to input image.
        img_size: (W, H) tuple specifying resize size.
    Returns
        grayim: float32 numpy array sized H x W with values in range [0, 1].
    """
    grayim = cv2.imread(impath)
    if grayim is None:
        raise Exception('Error reading image %s' % impath)
    # Image is resized via opencv.
    interp = cv2.INTER_AREA
    # grayim = cv2.resize(grayim, (640, 640), interpolation=interp)
    # grayim = cv2.resize(grayim, (832, 624), interpolation=interp)

    grayim = (grayim.astype('float32') / 255.)

    return grayim

img_name = "@0744006.215@3849072.485@49@S@34.754650330555556@113.66571939166667@115604@.jpg"
feature_dir = "/home/hxf/ws/data/zz_street/images/test/query0/query_features/"
img_dir = "/home/hxf/ws/data/zz_street/images/test/query0/queries/"

feature_set = LocalFeatureSet()
logging.debug("Read features set")
res = feature_set.read(feature_dir)

imgid = feature_set.get_id_by_fname(img_name)

if imgid < 0:
    pass

features = feature_set.get_features_by_id(imgid)

img = read_image(img_dir + img_name )
pts = features["keypoints"]

img_pts = draw_keypoints(img, pts)

cv2.imshow('img',img_pts)
cv2.waitKey(0)

