import argparse
import csv
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
DEVICE = 'cuda'
import cv2
from pylab import *
from sklearn.ensemble import IsolationForest
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from scipy.interpolate import splprep, splev


k1=np.array([[1.17404725e+03, 0.00000000e+00, 1.04039839e+03],
 [0.00000000e+00, 1.17434057e+03, 5.57015924e+02],
[0.00000000e+00,0.00000000e+00,1.00000000e+00]])
k2=np.array([[1.16024513e+03 ,0.00000000e+00 ,9.87355781e+02],
 [0.00000000e+00, 1.16124660e+03 ,5.48249006e+02],
 [0.00000000e+00 ,0.00000000e+00, 1.00000000e+00]])
k3=np.array([[1.15592821e+03, 0.00000000e+00, 1.00085670e+03],
 [0.00000000e+00 ,1.15823803e+03 ,5.13443338e+02],
[0.00000000e+00,0.00000000e+00,1.00000000e+00]])
k4=np.array([[1.15187875e+03, 0.00000000e+00, 9.95912661e+02],
 [0.00000000e+00 ,1.15536827e+03, 5.33342980e+02],
[0.00000000e+00,0.00000000e+00,1.00000000e+00]])
R1_2=np.array([[ 9.99918556e-01, -9.33371583e-03 , 8.70421976e-03],
 [ 9.33520452e-03  ,9.99956418e-01, -1.30416544e-04],
 [-8.70262313e-03 ,2.11661594e-04 , 9.99962109e-01]])
R1_4=np.array([[ 0.99978703, -0.02023303,  0.00406459],
 [ 0.02022541,  0.99979363,  0.00190645],
 [-0.00410232, -0.00182383,  0.99998992]])
t1_2=np.array([[ 79.26012539],
 [ -1.69524594],
 [-10.95947029]])
t1_4=np.array([[204.45857283],
 [ -1.53844957],
 [-12.4479269 ]])

def parse_args():
    parser = argparse.ArgumentParser(description='track Demo')
    parser.add_argument('--images1', dest='images1', default='./pic_7_11/1', help='video1 path')
    parser.add_argument('--images2', dest='images2', default='./pic_7_11/4', help='video2 path')
    parser.add_argument('--images_pre', dest='images_pre', default='./pic_7_11/2', help='videopre path')
    parser.add_argument('--track_point0',dest = 'track_point0', default='./draw_point/1.txt', help='track_point path')
    args = parser.parse_args()
    return args


def make_homog(points):
    """ 将点集（2*n的数组）转化为齐次坐标表示"""
    return vstack((points, ones((1, points.shape[1]))))

#寻找角点
def find_corners(img):
    sift = cv2.SIFT_create()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, corners_ =cv2.findChessboardCorners(gray, (6, 4), None)
    if ret:
        corners = cv2.cornerSubPix(gray, corners_, (13, 13), (-1, -1), criteria)
        # cv2.drawChessboardCorners(img, (6, 4), corners, ret)# 显示角点
        a_=np.reshape(corners, (-1, 2))#24*2
        a=a_.T
        b=make_homog(a)
        # c_ = np.array([[a_[7]], [a_[8]], [a_[14]], [a_[17]], [a_[19]], [a_[20]]])#ndi
        cv_kpts1 = [cv2.KeyPoint(a_[i][0], a_[i][1], 1)
                for i in range(a_.shape[0])]
        corner, corner_des = sift.compute(gray, cv_kpts1)#corner corner_des是 sift.compute结果，corners1是24个角点坐标，24*2
    return corner, a ,b

def get_groundTruth(path):
    with open(path,'r') as f:
        reader = csv.reader(f)
        next(reader)
        labels = [[float(i[0]),float(i[1])] for i in reader]
        return labels

# lk光流
def lk_points(image1,image2):
    feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 7 )

    # Parameters for lucas kanade optical flow
    lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    color = np.random.randint(0,255,(100,3))

    # Take first frame and find corners in it
    image1_gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(image1_gray, mask = None, **feature_params)

    # Create a mask image for drawing purposes
    image2_gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    p1, st, err = cv2.calcOpticalFlowPyrLK(image1_gray, image2_gray, p0, None, **lk_params)

    # Select good points
    good1= p1[st==1]
    good0 = p0[st==1]
    #可视化
    # b=image1.shape[1]
    # good1[:,0]=good1[:,0]+b
    # good1[:, 1] = good1[:, 1]
    # c=good1.shape[0]
    # img = np.concatenate([np.array(image1), np.array(image2)], axis=1)
    # for i  in range(c):
    #     cv2.line(img, (int(good0[i][0]),int(good0[i][1])), (int(good1[i][0]),int(good1[i][1])),(0,255,0), 3)
    # cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
    # cv2.imshow('frame',img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    return good0,good1

def sift_points(image1,image2,k):#k为阈值
    sift = cv2.SIFT_create()
    # 用特征点
    kp1, des1 = sift.detectAndCompute(image1, None)
    kp2, des2 = sift.detectAndCompute(image2, None)


    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    matchesMask = [[0, 0] for i in range(len(matches))]

    pt1_list = []
    pt2_list = []
    goodmatch = []

    for i, (m1, m2) in enumerate(matches):
        if m1.distance < k* m2.distance:  # 两个特征向量之间的欧氏距离，越小表明匹配度越高。
            matchesMask[i] = [1, 0]
            if m1 not in goodmatch:  # 剔除重复值
                goodmatch.append(m1)
                pt1 = kp1[m1.queryIdx].pt  # trainIdx    是匹配之后img1上关键点的序号
                pt2 = kp2[m1.trainIdx].pt  # q1ueryIdx  是匹配之后img2上关键点的序号
                if pt1 not in pt1_list:  # 剔除重复值
                    pt1_list.append(pt1)
                    pt2_list.append(pt2)
    good0 = np.array(pt1_list)  # 匹配点坐标集合
    good1= np.array(pt2_list)
    return good0,good1

#孤立森林筛选匹配点
def  isolation(good0,good1):
    slops = []
    for i in range(len(good0)):
        if good1[i][0] - good0[i][0] != 0:
            slop = (good1[i][1] - good0[i][1]) / (good1[i][0] -good0[i][0])
        else:
            slop = (good1[i][1] - good0[i][1]) / (10**-20)
        slops.append(slop)
    # 对初始匹配后的斜率使用孤立森林，过滤异常点噪音.n_estimators树的个数，max_samples每棵树中样本个数，contamination异常点比例，max_features每棵树的特征个数
    treeMode = IsolationForest(n_estimators=50, max_samples='auto', contamination='auto', max_features=1)
    slops_arr = np.array(slops).reshape(-1, 1)
    treeMode.fit(slops_arr)
    mylabel = treeMode.predict(slops_arr)

    good_index = []  # 里面的元素为good_matches 正常点对应的索引
    bad_index = []
    for i, v in enumerate(mylabel):
        if v == 1:
            # matches_index.append(i) # matches_index的值matches_index[i]表示最终正常点在good_matches中的索引位置
            good_index.append(i)
        else:
            bad_index.append(i)

    # 随机森林中，噪音剔除后，提取最终好的匹配
    good0_new = [good0[i] for i in good_index]
    good0_new=np.array(good0_new)

    good1_new = [good1[i] for i in good_index]
    good0_new = np.array(good0_new)
    return good0_new,good1_new

#knn
def knn_point(point0,good0,good1,k):
    list_stack_temp = []  # 建立一个空的栈
    a=good0.shape[0]
    for i in range(a):
        list_temp = []
        if good0[i][0] != point0[0] and good0[i][1]!=point0[1]:
            dis = math.sqrt(pow(good0[i][0] - point0[0], 2) + pow(good0[i][1] -point0[1], 2))#开方，位移
            if len(list_stack_temp) < k:
                list_stack_temp.append((good0[i][0], good0[i][1], dis,i))
                # print("临时栈中多了一组数据，目前有" + str(len(list_stack_temp)) + "组数据")
            else:
                for m in list_stack_temp:
                    list_temp.append(m[2])
                    # print("临时列表中有" + str(len(list_temp)) + "组数据")
                list_temp.append(dis)
                list_temp.sort()#升序排
                if dis != list_temp[-1]:#最后一个
                    max = list_temp[-1]
                    for n in list_stack_temp:
                        if n[2] == max:
                            list_stack_temp.remove(n)
                    list_stack_temp.append((good0[i][0], good0[i][1], dis,i))
    knn_points0=np.array(list_stack_temp)#（x,y,位移，索引）
    num=knn_points0.shape[0]
    knn_point1=[]
    for i in range (num):
        c=int(knn_points0[i][3])
        knn_point1.append((good1[c][0],good1[c][1],c))#（x,y,索引）
    knn_points1=np.array(knn_point1)
    return knn_points0,knn_points1

#k近邻与点去表示权重
def point_weight(point,knn_point):
    x=np.array(knn_point[:,0]).T
    y=np.array(knn_point[:,1]).T
    a=np.concatenate((x, y), axis=0)
    c=np.dot(a.T,a)
    t=np.dot(np.dot(np.linalg.inv(c),a.T),np.array(point).T)
    t=t/np.linalg.norm(t)#除以二范数
    return t#需要最小化一对点的二范数


#knn点表示
def wigeht(knn_points0,knn_points1,p0):
    p0=np.array(p0)
    k_p0=(knn_points0[:, :2]).T
    k_p1 = (knn_points1[:, :2]).T
    k_move=k_p1-k_p0
    k_2 = 1./knn_points0[:, 2]#k近邻距离的倒数
    m = knn_points0.shape[0]
    w_all=0
    for i in range (m):
        w_all=w_all+k_2[i]
    w=k_2/w_all
    p1=p0+np.dot(k_move,w)
    return p1

#已知内外参和一对匹配点p1，p4，求该点在image2上的位置，计算误差，p2为gt
def pre_dis(k1,k2,k4,t1_2,t1_4,R1_4,R1_2,p1,p4):
    a_=np.linalg.inv(k1)
    a = np.dot(a_, p1)  # xw/zw,yw/zw,1
    b = np.dot(R1_4, a)  # 3*1
    c = np.dot(R1_4[2, :], a)  # 1*1
    d = np.dot(k4, b)
    e = np.dot(k4, t1_4) - t1_4[2] * p4
    f = c*p4 - d
    zw_=np.array([e[0]/f[0],e[1]/f[1]])

    #zw选择不同的值，效果不同，zw=zw_[0]时效果最好
    # zw=(zw_[0]+zw_[1])/2
    zw=zw_[0]
   # zw = zw_[1]

    pw=zw*a
    zc_2=zw*np.dot(R1_2[2, :], a) +t1_2[2]
    pre2=np.dot(k2,(np.dot(R1_2,pw)+t1_2))/zc_2#
    # dis=np.linalg.norm(pre2- p2)
    return pre2

#def binocular_matching (args,tracked_points):
def binocular_matching (args,data):#双目匹配，被追踪的点
    dirs_init1 = os.listdir(args.images1)  # 相机1
    dirs_init1.sort()
    # dirs_init1=dirs_init1[:100]

    dirs_init2 = os.listdir(args.images2)  # 相机4
    dirs_init2.sort()
    # dirs_init2 = dirs_init2[:100]

    dirs_init_pre = os.listdir(args.images_pre)  # 相机2/人眼
    dirs_init_pre.sort()

    # data=args.track_point0
    points0= data

    i=0
    image0 = cv2.imread(args.images1 + '/{}'.format(dirs_init1[i]), flags=1)
    image1 = cv2.imread(args.images2 + '/{}'.format(dirs_init2[i]), flags=1)
    image_pre = cv2.imread(args.images_pre + '/{}'.format(dirs_init_pre[i]), flags=1)

    good0, good1 = sift_points(image0, image1)
    good0_new,good1_new=isolation(good0, good1)#相机1和4的匹配点

    # 匹配点可视化
    # img = np.concatenate((image0, image1), axis=1)
    # for i in range(len(good0_new)):
    #     if i%10==0:
    #         cv2.line(img, (int(good0_new[i][0]), int(good0_new[i][1])), (int(good1_new[i][0]+ 1920), int(good1_new[i][1])),(0, 0, 255), 3)
    #     # cv2.circ/, int(good0_new[i][1])),3, (0, 0, 255), -1)
    # cv2.namedWindow("img0", cv2.WINDOW_NORMAL)
    # cv2.imshow("img0", img)
    # cv2.waitKey(0)

    # points_0 = points0.T
    c = points0.shape
    track_points = []  # 存储映射过去的所有点
    for i in range(c[0]):  # 坐标映射
        point_0 = points0[i]
        cv2.circle(image0, (int(point_0[0]), int(point_0[1])), 2, (0, 255, 0), -1)
        knn_points0, knn_points1 = knn_point(point_0, good0_new, good1_new, 4)
        pre_point1 = wigeht(knn_points0, knn_points1, point_0)  # 先对相机1，4进行双目匹配

        # cv2.circle(image1, (int(pre_point1[0]), int(pre_point1[1])), 2, (0, 255, 0), -1)
        # cv2.namedWindow("img1", cv2.WINDOW_NORMAL)
        # cv2.imshow("img1", image0)
        # cv2.waitKey(0)
        # knn_points0=np.array(knn_points0)

        #可视化knn点
        # cv2.circle(image0, (int(point_0[0]), int(point_0[1])), 3, (255, 0,0), -1)#要追踪的点
        # for j in range(knn_points0.shape[0]):
        #     cv2.circle(image0, (int(knn_points0[j][0]), int(knn_points0[j][1])), 3, (0, 255, 0), -1)
        # cv2.namedWindow("img0", cv2.WINDOW_NORMAL)
        # cv2.imshow("img0", image0)

        # 映射到相机2上
        point0 = make_homog(point_0.reshape(2, 1))
        pre_point1 = make_homog(pre_point1.reshape(2, 1))

        pre_pre_point2 = pre_dis(k1, k2, k4, t1_2, t1_4, R1_4, R1_2, point0, pre_point1)
        pre_pre_point2=(pre_pre_point2.T)[:,:2]
        track_points.append(pre_pre_point2)  # 最终要追踪的点

        #可视化追踪的点
        cv2.circle(image_pre, (int(pre_pre_point2[0][0]), int(pre_pre_point2[0][1])), 2, (0, 255, 0), -1)
    cv2.namedWindow("image_pre", cv2.WINDOW_NORMAL)
    cv2.imshow("image_pre", image_pre)
    cv2.waitKey(0)
    track_points = np.array(track_points).reshape(-1,2)
    # track_points=lof(track_points)
    return track_points

def point_affine(point,good0,good1,k):
    knn_good0,knn_good1=knn_point(point, good0, good1, k)
    point_match=wigeht(knn_good0, knn_good1, point)
    # w=point_weight(point, knn_point)
    point_match_x_ = point_match[0].astype(int)
    point_match_x = point_match_x_.astype(float)
    point_match_y_ = point_match[1].astype(int)
    point_match_y = point_match_y_.astype(float)
    point_match = [point_match_x_, point_match_y_]
    return point_match,point_match_x,point_match_y


#连续帧追踪
def suc_track(args,track_points):
    dirs_init = os.listdir(args.images_pre)  # 相机4
    dirs_init.sort()
    for i in range(len(dirs_init)-1):
        image0 = cv2.imread(args.images_pre + '/{}'.format(dirs_init[i]), flags=1)
        image1=cv2.imread(args.images_pre + '/{}'.format(dirs_init[i+1]), flags=1)
        good0, good1 = sift_points(image0, image1)
        good0_new, good1_new = isolation(good0, good1)

        pre_point=[]
        for i in range (len(track_points[0])):#一帧图像上单个追踪
            point0=track_points[i]
            knn_points0,knn_points1=knn_point(point0, good0_new,good1_new, 4)
            pre_point1=wigeht(knn_points0, knn_points1, point0)
            pre_point.append(pre_point1)
            cv2.circle(image1, (int(pre_point1[0]), int(pre_point1[1])), 3, (0, 255, 0), -1)
        pre_point=np.array(pre_point)
        cv2.namedWindow("0", cv2.WINDOW_NORMAL)
        cv2.imshow("0", image1)
        cv2.waitKey(1)
    return (pre_point)

if __name__ == '__main__':
    # args = parse_args()
    # t_data=np.genfromtxt('./draw_point/1.txt', dtype=['float64', 'float64'], delimiter='')
    # data=t_data.view(np.float64).reshape(-1,2)
    # track_points=binocular_matching(args,data)

    img1 = cv2.imread("/home/ltt/point_track/huaxi/image01_z/000{}.jpg".format(35), 1)
    img2 = cv2.imread('/home/ltt/point_track/huaxi/image01_z/000{}.jpg'.format(55), 1)
    good1,good2=sift_points(img1,img2,0.4)
    img=np.concatenate((img1,img2),axis=1)
    good1=good1.astype(int)
    good2=good2.astype(int)
    for i in range(len(good1)):
        cv2.line(img, (good1[i,0], good1[i,1]), (good2[i,0]+img1.shape[1], good2[i,1]), color=(0, 255, 0))
    cv2.namedWindow("0", cv2.WINDOW_NORMAL)
    cv2.imshow("0", img)
    cv2.waitKey(0)
    a=1



