import cv2
import time
import numpy as np
import key_frame
from MDFlow_main.demo_md import flowtrack,mask_uv
# from RAFT.demo import flowtrack,line_track
import os
from sift import sift_points
# from light import sift_points,light_in

import  rotation

def point_affine(point, good0, good1, k):
    knn_good0, knn_good1 = knn_point(point, good0, good1, k)
    point_match = wigeht(knn_good0, knn_good1, point)
    point_match_x = point_match[0]
    point_match_y = point_match[1]
    return point_match, point_match_x, point_match_y

def wigeht(knn_points0, knn_points1, p0):
    p0 = np.array(p0)
    k_p0 = knn_points0[:, :2].T
    k_p1 = knn_points1[:, :2].T
    k_move = k_p1 - k_p0
    k_2 = 1. / knn_points0[:, 2]  # Inverse of knn distance

    # Using NumPy for vectorized operations
    w = k_2 / k_2.sum()
    p1 = p0 + np.dot(k_move, w)
    return p1

def knn_point(point0, good0, good1, k):
    distances = np.sqrt(np.sum((good0[:, :2] - point0[:2]) ** 2, axis=1))
    idx = np.argsort(distances)[:k]
    # Select the k nearest neighbors
    knn_points0 = np.hstack([good0[idx], distances[idx, None]])
    knn_points1 = good1[idx]

    return knn_points0, knn_points1



#根据线上点生成patch
def box_center(points):#points为画的点，l是扩的边长
    x_min= np.min(points[:,0])
    x_max= np.max(points[:,0])
    y_min= np.min(points[:,1])
    y_max= np.max(points[:,1])

    box_center_x = int(x_min + (x_max - x_min) / 2)
    box_center_x=float(box_center_x)
    box_center_y=int(y_min + (y_max - y_min) / 2)
    box_center_y=float(box_center_y)
    return box_center_x,box_center_y

def boxin(box_center_x,box_center_y,l,img):#生成box
    # box_center = [box_center_x, box_center_y]
    imgw = img.shape[1]-1  # x最大索引
    imgh = img.shape[0]-1  # y最大索引
    box_x1=[box_center_x-l,box_center_y-l]
    box_x4 = [box_center_x + l,box_center_y+l]

    if box_x1[1]<0 :
        if 0 <= box_x1[0] < box_x4[0] <= imgw:
            # print("bb9")
            box_x1 = [box_center_x - l, 0]
            box_x2 = [box_center_x + l, 0]
            box_x3 = [box_center_x - l, 2*l]
            box_x4 = [box_center_x + l, 2*l]
        elif box_x1[0]<0:
            # print("bb8")
            # des_x=-box_x1[0]
            box_x1 = [0, 0]
            box_x2 = [2*l, 0]
            box_x3 = [0, 2*l]
            box_x4 = [2*l, 2*l]
        else:
            # print("bb7")
            box_x1 = [imgw-2*l, 0]
            box_x2 = [imgw, 0]
            box_x3 = [imgw-2*l, 2*l]
            box_x4 = [imgw,2*l]

    elif 0<=box_x1[1] <=box_x4[1]<imgh:
        if box_x1[0]<=0:
            # print("bb6")
            box_x1 = [0,box_center_y-l]
            box_x2 = [2 * l,box_center_y-l]
            box_x3 = [0,box_center_y+l]
            box_x4 = [2 * l, box_center_y+l]
        elif box_x4[0]>=imgw:
            # print("bb5")
            box_x1 = [imgw-2*l, box_center_y - l]
            box_x2 = [imgw, box_center_y - l]
            box_x3 = [imgw-2*l, box_center_y + l]
            box_x4 = [imgw, box_center_y + l]
        else:
            # print("bb4")
            box_x2 = [box_center_x +l, box_center_y - l]
            box_x3 = [box_center_x - l, box_center_y + l]
    else:#bobox_x4[1]>imgh
        if box_x1[0]<=0:
            # print("bb13")
            box_x1 = [0, imgh -2*l]
            box_x2 = [2*l,imgh -2*l]
            box_x3 = [0, imgh]
            box_x4 = [2*l, imgh]
        elif box_x4[0]>=imgw:
            # print("bb2")
            box_x1 = [imgw - 2 * l, imgh -2*l]
            box_x2 = [imgw, imgh -2*l]
            box_x3 = [imgw - 2 * l,imgh]
            box_x4 = [imgw , imgh]
        else:
            # print("bb1")
            box_x1 = [box_center_x - l, imgh-2*l]
            box_x2 = [box_center_x + l, imgh-2*l]
            box_x3 = [box_center_x - l, imgh]
            box_x4 = [box_center_x + l, imgh]

    box_centernew=[box_x1[0]+l,box_x1[1]+l]
    box = np.array([box_x1, box_x2, box_x3, box_x4, box_centernew, [2 * l, 2 * l]])
    box = box.astype(int)
    return box

def line_box(img,box):
    cv2.rectangle(img,(int(box[0,0]),int(box[0,1])),(int(box[3,0]),int(box[3,1])), (0, 255, 0), 2)
    return img

def H_point(H, point):  # 任意一点经过单应性矩阵转换
    point_homogeneous = np.append(point, 1)  # 转换为齐次坐标
    transformed_point = np.dot(H, point_homogeneous)
    transformed_point /= transformed_point[2]  # 转换回非齐次坐标
    box2_center = transformed_point[:2].astype(int)
    return transformed_point[:2].astype(int),box2_center[0],box2_center[1]


# def box_Affine(simi_frame):
def box_Affine(simi_frame,img2, img2_index):#区域追踪，输入simi_frame中的一帧,
    box1 = simi_frame[0].box
    l= box1[5,0]/2
    #求单应性矩阵
    src= np.float32(simi_frame[2])
    dst = np.float32(simi_frame[3])

    H, mask= cv2.findHomography(src, dst, method=cv2.RANSAC, ransacReprojThreshold=2.0, maxIters=5000, confidence=0.9999)
    corr_number=sum(mask ==1)#单应性矩阵的正确度
    corr=corr_number/len(src)

    if corr>=0.9:
    # if corr >= 0:
        # time_H0 = time.time()
        box2_x1, box2_x1_x, box2_x1_y = H_point(H,box1[0, :])
        box2_x2, box2_x2_x, box2_x2_y = H_point(H,box1[1, :])
        box2_x3, box2_x3_x, box2_x3_y = H_point(H,box1[2, :])
        box2_x4, box2_x4_x, box2_x4_y = H_point(H,box1[3, :])
        box2_c, box2_c_x, box2_c_y = H_point(H, box1[4, :])
        # time_H1 = time.time()
        # time_H = time_H1 - time_H0
        # with open("/home/ltt/point_track/time_1015/time_H.txt", 'a') as fd:
        # cv2.circle(img2, (int(box2_x1_x), int(box2_x1_y)), 3, (0, 0, 255), -1)
        # cv2.circle(img2, (int(box2_x2_x), int(box2_x2_y)), 3, (0, 0, 255), -1)
        # cv2.circle(img2, (int(box2_x3_x), int(box2_x3_y)), 3, (0, 0, 255), -1)
        # cv2.circle(img2, (int(box2_x4_x), int(box2_x4_y)), 3, (0, 0, 255), -1)
    else:
        # print('KNNNNNNNNNN__________________________________________',corr)
        # time_K0 = time.time()
        box2_x1, box2_x1_x, box2_x1_y = point_affine(box1[0, :],src,dst,4)
        box2_x2, box2_x2_x, box2_x2_y = point_affine(box1[1, :],src,dst,4)
        box2_x3, box2_x3_x, box2_x3_y = point_affine(box1[2, :],src,dst,4)
        box2_x4, box2_x4_x, box2_x4_y = point_affine(box1[3, :],src,dst,4)
        box2_c, box2_c_x, box2_c_y = point_affine(box1[4, :], src, dst, 4)
        # time_K1 = time.time()
        # time_K = time_K1 - time_K0
        # with open("/home/ltt/point_track/time_1015/time_K.txt", 'a') as fd:
        #     fd.write(str(time_K) + '\n')
    #     cv2.circle(img2, (int(box2_x1_x), int(box2_x1_y)), 3, (0, 0, 255), -1)
    #     cv2.circle(img2, (int(box2_x2_x), int(box2_x2_y)), 3, (0, 0, 255), -1)
    #     cv2.circle(img2, (int(box2_x3_x), int(box2_x3_y)), 3, (0, 0, 255), -1)
    #     cv2.circle(img2, (int(box2_x4_x), int(box2_x4_y)), 3, (0, 0, 255), -1)
    #
    # cv2.imwrite(os.path.join('/home/ltt/point_track/rot_img/mask_box/{}.jpg'.format( img2_index)), img2)

    rot0=rotation.angle_between_vectors(box1[0, :]-box1[4, :], box2_x1-box2_c)
    rot1 = rotation.angle_between_vectors(box1[1, :] - box1[4, :], box2_x2 - box2_c)
    rot2 = rotation.angle_between_vectors(box1[2, :] - box1[4, :], box2_x3 - box2_c)
    rot3 = rotation.angle_between_vectors(box1[3, :] - box1[4, :], box2_x4 - box2_c)

    rot=(rot0+rot1+rot2+rot3)/4
    # print("旋转角为",rot)

    img1_mask = simi_frame[0].imgrgb[box1[0, 1]:box1[2, 1] + 1, box1[0, 0]:box1[1, 0] + 1, :]

    if rot<=100:
        #包含四个角点的最大区域
        y_min=min(box2_x1_y,box2_x2_y)
        y_max=max(box2_x3_y,box2_x4_y)
        x_min=min(box2_x1_x,box2_x3_x)
        x_max=max(box2_x2_x,box2_x4_x)

        box2_center_x=int((x_max+x_min)/2)
        box2_center_y=int((y_max+y_min)/2)

        box2_l=l
        box2_int=boxin(box2_center_x, box2_center_y, box2_l, simi_frame[0].imgrgb)
        img2_mask = img2[box2_int[0, 1]:box2_int[2, 1] + 1, box2_int[0, 0]:box2_int[1, 0] + 1, :]

        u_f, v_f, _,_ = flowtrack(img1_mask, img2_mask)
        # occ_fw[:] = [0, 0, 0]
        # occ_bw[:] = [0, 0, 0]

        data_new, move_f,linepoint1_new,linepoint2_new=box_line_track(u_f, v_f, simi_frame[0], box2_int)
        # _,_,data_new=mask_uv(linepoint1_new,linepoint2_new,data2, occ_fw, occ_bw)
        img_mask = np.concatenate((img1_mask, img2_mask), axis=1)

    else:#warp img2
        center =  box1[4, :].reshape(2,1)#2*1
        print(center)
        img3 = cv2.warpPerspective(img2, H, (img2.shape[1], img2.shape[0]))  # warp image B onto image A
        center_p3 = np.dot(H, np.vstack((center + 0.5,1)))  # 进行像素中心对齐,像素坐标被认为是在像素的角落。添加0.5将它们移动到像素中心。
        center_p3 = center_p3[0:2, :] / center_p3[2, :] - 0.5
        box_3 = boxin(center_p3[0], center_p3[1], l, img3)
        img3_mask = img3[box_3[0, 1]:box_3[2, 1] + 1, box_3[0, 0]:box_3[1, 0] + 1, :]

        u_f, v_f,_,_ = flowtrack(img1_mask, img3_mask)
        # occ_fw[:]=[0,0,0]
        # occ_bw[:]=[0,0,0]
        data_new, _ ,_,_= box_line_track(u_f, v_f, simi_frame[0], box_3)

        img_box = key_frame.line_trackpoint(img3, data_new)
        # cv2.imwrite(os.path.join('/home/ltt/point_track/rot_img/1203/img3_1124/{}.jpg'.format(img2_index)), img_box)

        data_new_=data_new.T

        data_new = np.dot(np.linalg.inv(H), np.vstack((data_new_, np.ones((1,data_new_.shape[1])))))
        data_new= data_new[0:2, :] / data_new[2, :]
        data_new=data_new.T

        move_f = np.mean(data_new - simi_frame[0].linepoint, axis=0)
        img_mask = np.concatenate((img1_mask, img3_mask), axis=1)
    return data_new,move_f,img_mask,rot


def box_uv(img1, img2, box2_int):
    global img1_mask, img2_mask
    box1_int = img1.box.astype(int)
    img1_mask = img1.imgrgb[box1_int[0, 1]:box1_int[2, 1] + 1, box1_int[0, 0]:box1_int[1, 0] + 1, :]
    img2_mask = img2[box2_int[0, 1]:box2_int[2, 1] + 1, box2_int[0, 0]:box2_int[1, 0] + 1, :]
    img_mask = np.concatenate((img1_mask, img2_mask), axis=1)
    u_f, v_f, _,_ = flowtrack(img1_mask, img2_mask)
    return u_f, v_f,img_mask


def traversal_kframe(kframe, img_kp, img_des, k):
    good_list = []
    for m in range(len(kframe)):
        good1, good2 = sift_points(kframe[m].kp, kframe[m].des, img_kp, img_des)
        # good1, good2=light_in(kframe[m].box, good1, good2)
        good1_all, good2_all = sift_points(kframe[m].kpall, kframe[m].desall, img_kp, img_des)

        if len(good1) != 0:
            move = good2 - good1
            move_sift = np.mean(move, axis=0)
            good_list.append((kframe[m], len(good1),good1, good2,move_sift,good1_all,good2_all))
    good_list.sort(key=lambda x: x[1], reverse=True)
    simi_frame, simi_frame_best, simi_frame_nobest = [], [], []
    if good_list:
        top = min(len(good_list), k)
        simi_frame = good_list[:top]
        for q in range(top):
            if simi_frame[q][1] >= 8:
                simi_frame_best.append(simi_frame[q])
            else:
                simi_frame_nobest.append(simi_frame[q])
    return simi_frame, simi_frame_best, simi_frame_nobest

def box_line_track(u_f, v_f, kf, box2):
    # kf.box的左上角坐标
    box1_left = kf.box[0, :2]
    linepoint1_new = kf.linepoint - box1_left
    # 通过光流更新点的位置
    idxs = linepoint1_new.astype(int)
    deltas = np.vstack((u_f[idxs[:, 1], idxs[:, 0]], v_f[idxs[:, 1], idxs[:, 0]])).T
    linepoint2_new= linepoint1_new + deltas
    data_new =linepoint2_new+ box2[0, :2]
    # data_new=np.array(data_new)
    # 计算移动向量的平均值
    move_f = np.mean(data_new - kf.linepoint, axis=0)
    return data_new, move_f,linepoint1_new,linepoint2_new#linepoint1_new,linepoint2_new是mask上的点的索引

def keyframe_affine(img2, simi_frame, img2_index, scale_threshold):
    kf = simi_frame[0]  # 获取相似帧，存了匹配信息
    data_new,move_f,img_mask,rot= box_Affine(kf,img2, img2_index,)  # 获取追踪区域
    # u_f, v_f,img_mask= box_uv(kf[0], img2, box2)  # kf[0]关键帧
    # data_new, move_f = box_line_track(u_f, v_f, kf[0], box2)
    move_sift = simi_frame[0][4]
    move = np.linalg.norm(move_f - move_sift)
    if move >= 35:#move >= 35
        data_new = []
    if len(data_new)!=0:  # 如果 data_new 非空
        condition1 = (data_new[:, 0] >= 0) & (data_new[:, 0] < img2.shape[1])
        condition2 = (data_new[:, 1] >= 0) & (data_new[:, 1] < img2.shape[0])
        combined_condition = condition1 & condition2
        data_all = data_new[combined_condition]
    else:
        data_all = np.array([])
    if len(data_all)!=0:
        datax_min, datax_max = np.min(data_all[:, 0]), np.max(data_all[:, 0])
        datay_min, datay_max = np.min(data_all[:, 1]), np.max(data_all[:, 1])
        data_scale = (datax_min, datax_max, datay_min, datay_max)
    else:
        data_scale = (0, 0, 0, 0)
    return data_all, data_scale,img_mask,rot

if __name__ == '__main__':
    t_data = np.genfromtxt("/home/ltt/point_track/draw_point/1.txt", dtype=['float64', 'float64'], delimiter='')
    data = t_data.view(np.float64).reshape(-1, 2)
    path="/home/ltt/point_track/huaxi/image01_z/"
    dirs_init1 = os.listdir(path)  # 相机1
    dirs_init1.sort()
