import sys
sys.path.append("..") 
sys.path.append("./") 
# sys.path.append("./model")
import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
from model.common.common import timestr
from model.search.LocFeature2ImgIndex import LocFeature2ImgIndex
from model.search.LocalFeatureSet import LocalFeatureSet
from model.postprocess.process import process_featureset, process_image
from model.search.LocalFeatureIndex import LocalFeatureIndex
from loc.tools import Tools as tool
from loc.Space_resection import Cal_pos
import yaml
from sklearn.cluster import KMeans
import time
# import torch.profiler


"""
With this script you can evaluate checkpoints or test models from two popular
landmark retrieval github repos.
The first is https://github.com/naver/deep-image-retrieval from Naver labs,
provides ResNet-50 and ResNet-101 trained with AP on Google Landmarks 18 clean.
$ python eval.py --off_the_shelf=naver --l2=none --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048

The second is https://github.com/filipradenovic/cnnimageretrieval-pytorch from
Radenovic, provides ResNet-50 and ResNet-101 trained with a triplet loss
on Google Landmarks 18 and sfm120k.
$ python eval.py --off_the_shelf=radenovic_gldv1 --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
$ python eval.py --off_the_shelf=radenovic_sfm --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048

Note that although the architectures are almost the same, Naver's
implementation does not use a l2 normalization before/after the GeM aggregation,
while Radenovic's uses it after (and we use it before, which shows better
results in VG)
"""

import os
import sys
import torch
import logging
from os.path import join
from datetime import datetime
from torch.utils.model_zoo import load_url

import math

import commons
import cv2



from tqdm import tqdm


######################################### SETUP #########################################
from model.search.LocalFeatureSet2 import LocalFeatureSet2
import argparse
import model.utils.parse_args as parse_args
def result_sort(dis,index,key,result_dis,result_index,result_key,topn=30):
    if dis.shape[1]:
        result_dis = np.concatenate([result_dis,dis],axis=1)
        result_index = np.concatenate([result_index, index], axis=1)
        result_key = np.concatenate([result_key, key], axis=1)
    # topn_index = np.argsort(np.array(result_dis),axis=1)[:,:topn]
    topn_index = np.argsort(np.array(result_dis), axis=1)
    for i in range(len(topn_index)):
        result_dis[i] = result_dis[i,topn_index[i]]
        result_index[i] = result_index[i,topn_index[i]]
        result_key[i] = result_key[i, topn_index[i]]
    result_dis = result_dis[:,:topn]
    result_key = result_key[:,:topn]
    result_index = result_index[:,:topn]
    return result_dis,result_index,result_key

###############get_features#####################

# 计算features.h5中，每个特征点的三维坐标
def _getFeatures(args):
    feature_set = LocalFeatureSet2()
    fdir = join(args.data.datasets_folder, args.data.dataset_name, args.features.features_dir)
    feature_set.set_feature_dir(fdir)
    if feature_set.read() < 0:
        return -1

    features = feature_set.get_features()
    return features;

def random_points(shape,num):
    raws = np.random.randint(0, shape[0], num).reshape(num, 1)
    cols = np.random.randint(0, shape[1], num).reshape(num, 1)
    pois = tuple(np.concatenate([raws, cols], axis=1).tolist())
    return pois

def extract_ground(ground,k,i):
    result = np.array([]).reshape(-1,3)
    start = time.time()
    for j in range(len(k)):
        result = np.concatenate([result,ground[k[j]][i[j]].reshape(1,3)],axis=0)
    print(time.time()-start)
    # for j in range(len(ground)):
    #     index = np.where(k==)
    return result



context = argparse.Namespace()
os.chdir(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
args,config = parse_args.parse_arguments("/mnt/B0AAF35FAAF32110/lsvn/3_3dtiles_vpr/3dtiles-vpr/configs/test_3dtiles_3d.yaml")
context.args = args
context.config = config


# start_time = datetime.now()
# args.save_dir = join("log", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
# commons.setup_logging(args.save_dir)
# # commons.make_deterministic(args.seed)
# logging.info(f"Arguments: {args}")
# logging.info(f"The outputs are being saved in {args.save_dir}")



######################################### TEST on TEST SET #########################################


# config.args = args


def ecef_to_lla(x,y,z):
    # pyproj库函数
    # ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
    # lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
    # lon, lat, alt = pyproj.transform(ecef, lla, point_3d[0], point_3d[1], point_3d[2], radians=False)

    # 公式计算
    WGS84_A = 6378137.0
    # WGS84_E = 1 / 298.257222101
    f= 1/298.257223563
    b = (1 - f) * WGS84_A  # 地球短半轴长度
    e = math.sqrt(1 - (b / WGS84_A) ** 2)  # 地球的偏心率
    WGS84_E = e

    b = (WGS84_A * WGS84_A * (1 - WGS84_E * WGS84_E))  ** 0.5
    ep = ((WGS84_A * WGS84_A - b * b) / (b * b)) ** 0.5
    p = math.hypot(x, y)
    th = math.atan2(WGS84_A * z, b * p)
    lon = math.atan2(y, x)
    lat = math.atan2((z + ep * ep * b * (math.sin(th) **3 )), (p - WGS84_E * WGS84_E * WGS84_A * (math.cos(th) ** 3)))
    N = WGS84_A / ((1 - WGS84_E * WGS84_E * math.sin(lat) * math.sin(lat)) ** 0.5)
    alt = p / math.cos(lat) - N
    lat = lat/math.pi*180
    lon = lon/math.pi*180
    return lat,lon,alt

def ecef_to_local(x,y,z):
    transform = np.array([-0.916833527953752,-0.399269685826357,0.0,0.0,
                      0.227936566197776,-0.523405341190006,0.821030919395695,0.0,
                      -0.327812757240844,0.752748674388667,0.570883726687194,0.0,
                      -2093119.2579733,4806380.2032549,3620751.16340286,1.0])
    vec = np.array([x,y,z,1])
    # vec = np.array([x,y,z,1]).reshape([4,1])
    transform = transform.reshape([4,4]).transpose()
    transform_inv = np.linalg.inv(transform)
    # transform_inv = transform.transpose()
    vec3 = np.matmul(transform_inv,vec)
    return vec3



image_name = "test"
img_points = np.array([
[771,448 ],
[674,4146],
[2129,4059],
[3155,1080],
[4125,2895],
[6027,292],
[6060,4187],
[6841,918],
[7070,4095]
],dtype=np.float32)
ground_0 = np.array([
[-2093057.41,4806382.43,3620977.84],
[-2093172.55,4806300.63,3620978.08],
[-2093158.92,4806277.46,3621016.38],
[-2093040.00,4806313.32,3621037.53],
[-2093105.69,4806261.03,3621069.05],
[-2092999.52,4806278.49,3621140.71],
[-2093134.14,4806210.09,3621119.47],
[-2092998.95,4806239.99,3621158.41],
[-2093124.18,4806194.20,3621146.53]
],dtype=np.float32)

obj_points = np.array([
[-2093057.41,4806382.43,3620977.84],
[-2093172.55,4806300.63,3620978.08],
[-2093158.92,4806277.46,3621016.38],
[-2093040.00,4806313.32,3621037.53],
[-2093105.69,4806261.03,3621069.05],
[-2092999.52,4806278.49,3621140.71],
[-2093134.14,4806210.09,3621119.47],
[-2092998.95,4806239.99,3621158.41],
[-2093124.18,4806194.20,3621146.53]
],dtype=np.float32)

for i in range(len(ground_0)):
    gi = ground_0[i]
    loc = ecef_to_local(gi[0],gi[1],gi[2])
    obj_points[i] = loc[0:3]

# center = np.average(obj_points,axis=0)
# obj_points = obj_points - center

# tool.draw_poi_cloud(ground, [])
image_width = 8192
image_height = 5460



#######################test 1  ########################
cal_pos = Cal_pos()
pos, ground, dis, pho_dis = cal_pos.get_pos(img_points,obj_points,image_width,image_height)
print(pos)


#######################test 2  ########################

# F=8529.19657994319
# cameraMatrix = np.array([
#     [F,0,image_width],
#     [0,F,image_height],
#     [0,0,1]
# ],dtype=np.float32)
# distCoeffs = np.array([0,0,0,0],dtype=np.float32)

# cameraMatrix =np.array([
# [8529.19657994319, 0, 4096+9.89224588620221],
# [0, 8529.19657994319, 2730+7.35020245943671],
# [0, 0, 1]], dtype=np.float32)
# distCoeffs = np.array([-0.0471627783238073,-0.0154369488516742, 0.000200148071496319, 0.000117138032146703,-0.0454659387045057],dtype=np.float32)
# # distCoeffs = np.array([0,0, 0, 0,0],dtype=np.float32)

sc = 1024.0 / 8192.0
cameraMatrix =np.array([
[8529.19657994319 * sc, 0, 4105.89224588620221*sc],
[0, 8529.19657994319*sc, 2737.35020245943671*sc],
[0, 0, 1]], dtype=np.float32)
distCoeffs = np.array([-0.0471627783238073,-0.0154369488516742, 0.000200148071496319, 0.000117138032146703,-0.0454659387045057],dtype=np.float32)

img_points *= sc


ret,rvec,tvec,inners = cv2.solvePnPRansac(obj_points,img_points,cameraMatrix,distCoeffs,flags=cv2.SOLVEPNP_EPNP,reprojectionError=20)
# ret,rvec,tvec = cv2.solvePnP(obj_points,img_points,cameraMatrix,distCoeffs,flags=cv2.SOLVEPNP_EPNP)

R = cv2.Rodrigues(rvec)
r = R[0].copy()
campos = np.array([[0,0,0]]).T
campos = np.dot(r.T,campos-tvec)
# pt = pt[:,0] + center

image_path = "@168.03335140971467@326.89860444236547@336.0023833992891@.jpg"
pa_arr = image_path.split("@")
query_coords = np.array([pa_arr[-4],pa_arr[-3],pa_arr[-2]]).astype(float)
evec = query_coords- campos[:,0] 
error = math.sqrt(evec[0]*evec[0] +evec[1]*evec[1]+evec[2]*evec[2] )
print(error)


#@168.03335140971467@326.89860444236547@336.0023833992891@.jpg


# fp = open(outpath+str(imgid)+'.txt','w')
# if len(pos)>1:
#     print('拍摄位置:', image_name, file=fp)
#     print('计算位置:',pos,file=fp)
#     print('匹配点:', ground, file=fp)
#     print('摄影距离:',dis,file=fp)
# fp.close()
# query_index = imgid
# descriptors = features["descriptors"]
# keypoints = features["keypoints"]
# pred_point3ds = features["pred_point3ds"]
#
# image_name = features["image_name"]
#
# image_width = features["attrs"]["image_width"]
# image_height = features["attrs"]["image_height"]

######





