import os
# os.environ['CUDA_VISIBLE_DEVICES'] = " "
import numpy as np
import cv2
import torch
import torch.nn.functional as F
from MDFlow_main.models.FastFlowNet import FastFlowNet
from MDFlow_main.utils import read, write, centralize, flow_to_color, get_occ_mask
import time
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import yaml

div_flow = 20.0
div_size = 64
model = FastFlowNet().cuda().eval()
# model.load_state_dict(torch.load('./checkpoints/fastflownet_chairs.pth'))
# model.load_state_dict(torch.load("/home/ltt/point_track_final/MDFlow_main/checkpoints/fastflownet_gtav.pth"))
# model.load_state_dict(torch.load('./checkpoints/fastflownet_sintel.pth'))
# model.load_state_dict(torch.load('./checkpoints/fastflownet_kitti.pth'))

current_dir = os.path.dirname(os.path.abspath(__file__))
config_path=os.path.abspath(os.path.join(current_dir, './checkpoints/fastflownet_gtav.pth'))
model.load_state_dict(torch.load(config_path))

def sift_points(image1,image2,k):#k为阈值
    sift = cv2.SIFT_create()
    # 用特征点
    kp1, des1 = sift.detectAndCompute(image1, None)
    kp2, des2 = sift.detectAndCompute(image2, None)


    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    matchesMask = [[0, 0] for i in range(len(matches))]

    pt1_list = []
    pt2_list = []
    goodmatch = []

    for i, (m1, m2) in enumerate(matches):
        if m1.distance < k* m2.distance:  # 两个特征向量之间的欧氏距离，越小表明匹配度越高。
            matchesMask[i] = [1, 0]
            if m1 not in goodmatch:  # 剔除重复值
                goodmatch.append(m1)
                pt1 = kp1[m1.queryIdx].pt  # trainIdx    是匹配之后img1上关键点的序号
                pt2 = kp2[m1.trainIdx].pt  # q1ueryIdx  是匹配之后img2上关键点的序号
                if pt1 not in pt1_list:  # 剔除重复值
                    pt1_list.append(pt1)
                    pt2_list.append(pt2)
    good0 = np.array(pt1_list)  # 匹配点坐标集合
    good1= np.array(pt2_list)
    return good0,good1

def flowtrack(img1_np,img2_np):
    img1 = torch.from_numpy(img1_np).float().permute(2, 0, 1).unsqueeze(0)/255.0
    img2 = torch.from_numpy(img2_np).float().permute(2, 0, 1).unsqueeze(0)/255.0
    img1=img1.cuda()
    img2=img2.cuda()
    img1, img2, _ = centralize(img1, img2)

    height, width = img1.shape[-2:]
    orig_size = (int(height), int(width))

    if height % div_size != 0 or width % div_size != 0:
        input_size = (
            int(div_size * np.ceil(height / div_size)),#向上取整
            int(div_size * np.ceil(width / div_size))
        )
        img1 = F.interpolate(img1, size=input_size, mode='bilinear', align_corners=False)
        img2 = F.interpolate(img2, size=input_size, mode='bilinear', align_corners=False)
    else:
        input_size = orig_size

    input_fw = torch.cat([img1, img2], 1).cuda()
    input_bw = torch.cat([img2, img1], 1).cuda()
    input_t = torch.cat([input_fw, input_bw], 0)

    output = model(input_t).data

    flow = div_flow * F.interpolate(output, size=input_size, mode='bilinear', align_corners=False)

    if input_size != orig_size:
        scale_h = orig_size[0] / input_size[0]
        scale_w = orig_size[1] / input_size[1]
        flow = F.interpolate(flow, size=orig_size, mode='bilinear', align_corners=False)
        flow[:, 0, :, :] *= scale_w
        flow[:, 1, :, :] *= scale_h

    flow_fw = flow[:1, :, :, :]
    flow_bw = flow[1:, :, :, :]

    flow_fw = flow_fw[0].cpu().permute(1, 2, 0).numpy()
    flow_bw = flow_bw[0].cpu().permute(1, 2, 0).numpy()

    # occ_fw, occ_bw = get_occ_mask(flow_fw, flow_bw)
    # occ_fw = (255.0 * occ_fw[0].cpu().permute(1, 2, 0).repeat(1, 1, 3)).numpy().astype(np.uint8)
    # occ_bw = (255.0 * occ_bw[0].cpu().permute(1, 2, 0).repeat(1, 1, 3)).numpy().astype(np.uint8)

    flow_fw_v ,u_f,v_f= flow_to_color(flow_fw)
    flow_bw_v ,u_b,v_b= flow_to_color(flow_bw)
    # img= np.concatenate((img1_np, img2_np), 1)
    #
    # img_r1 = np.concatenate((img1_np, img2_np), 1)
    # img_r2 = np.concatenate((flow_fw_v, flow_bw_v), 1)

    # img_r3 = np.concatenate((occ_fw, occ_bw), 1)
    # img_out = np.concatenate((img_r1, img_r2, img_r3), 0)

    # cv2.namedWindow("3", cv2.WINDOW_NORMAL)
    # cv2.imshow("3", flow_fw_v)
    # cv2.waitKey(0)
    # u_f = np.array(u_f,np.int32)
    # v_f = np.array(v_f,np.int32)
    # u_b= np.array(u_b, np.int32)
    # v_b = np.array(v_b, np.int32)

    # return u_f,v_f,img_r2
    # return u_f,v_f,occ_fw,occ_bw
    return u_f, v_f, 1,1


def mask_uv(data1,data2,data2_new,occ_fw,occ_bw):#
    mask1 = np.all(occ_fw == [255, 255, 255], axis=2)#[255, 255, 255]处为1
    mask2 = np.all(occ_bw == [255, 255, 255], axis=2)
    # 使用points中的坐标来索引mask，找到对应位置的值
    data1 = data1.astype(int)
    result1 = mask1[data1[:, 0], data1[:, 1]]
    result1 = result1.astype(int).reshape(-1, 1)

    data2=data2.astype(int)
    result2 = mask2[data2[:, 0], data2[:, 1]]
    result2 = result2.astype(int).reshape(-1, 1)
    result = np.logical_or(result1, result2).astype(int)

    out_of_mask2_indices = np.where((data2[:, 0] < 0) | (data2[:, 0] >= mask2.shape[0]) |
                                    (data2[:, 1] < 0) | (data2[:, 1] >= mask2.shape[1]))

    # 如果存在不在mask2范围内的点，将它们的result2值设置为对应的result1值
    if len(out_of_mask2_indices[0]) > 0:
        result2[out_of_mask2_indices] = result1[out_of_mask2_indices]
    result = np.logical_or(result1, result2).astype(int)
    data2_new_mask = np.concatenate((data2_new, result), axis=1)
    data2_new_nomask = data2_new_mask[data2_new_mask[:, 2] == 0]
    data2_new = data2_new_nomask[:, :2]
    data1_mask = np.concatenate((data1, result), axis=1)  # 将result和points合并成新的矩阵
    data1_nomask = data1_mask[data1_mask[:, 2] == 0]#提取mask为0的行
    data2_mask = np.concatenate((data2, result), axis=1)  # 将result和points合并成新的矩阵
    data2_nomask = data2_mask[data2_mask[:, 2] == 0]
    data2_new_mask = np.concatenate((data2_new, result), axis=1)  # 将result和points合并成新的矩阵
    data2_new_nomask = data2_new_mask[data2_new_mask[:, 2] == 0]
    data2_new=data2_new_nomask[:,:2]
    return data1_nomask,data2_nomask,data2_new

def line_track_(u_f,v_f,data,img2,img1):#用于普通帧可视化
    data_new = []
    data_newin=[]
    for j in range(len(data)):
        idx0=int(data[j][0])
        idx1=int(data[j][1])

        # cv2.circle(img1, (int(idx0), int(idx1)), 2, (0, 255, 0), -1)  # 蓝色
        x=idx0 + u_f[idx1][idx0]
        y=idx1+v_f[idx1][idx0]
        data_new.append((x, y))
        if x>= 0 and x <= img2.shape[1] and y >= 0 and y<= img2.shape[0]:
            data_newin.append((x, y))
            cv2.circle(img2, (int(x), int(y)), 5, (0,255, 0), -1)#蓝色
    data_new=np.array(data_new)
    return data_new,img2,img1,data_newin


def line_track(u_f, v_f, data, img2):
    # Convert data to NumPy array for faster indexing
    data = np.array(data, dtype=int)
    idx0 = data[:, 0]
    idx1 = data[:, 1]
    # Calculate new positions using vectorized operations
    x = idx0 + u_f[idx1, idx0]
    y = idx1 + v_f[idx1, idx0]
    # Stack them into a single array
    data_new = np.column_stack((x, y))
    # Check bounds
    mask = (x >= 0) & (x <= img2.shape[1]) & (y >= 0) & (y <= img2.shape[0])
    data_newin = data_new[mask]
    # Draw circles using cv2.circle
    for point in data_newin:
        cv2.circle(img2, tuple(point.astype(int)), 1, (0, 255, 0), -1)
    return data_new, img2, data_newin



def pre_dis(k1,k2,k4,t1_2,t1_4,R1_4,R1_2,p1,p4):#p1,p4齐次坐标
    a_=np.linalg.inv(k1)
    a = np.dot(a_, p1)  # xw/zw,yw/zw,1
    b = np.dot(R1_4, a)  # 3*1
    c = np.dot(R1_4[2, :], a)  # 1*1
    d = np.dot(k4, b)
    e = np.dot(k4, t1_4) - t1_4[2] * p4
    f = c*p4 - d
    zw_=np.array([e[0]/f[0],e[1]/f[1]])
    #zw选择不同的值，效果不同，zw=zw_[0]时效果最好
    # zw=(zw_[0]+zw_[1])/2
    zw=zw_[0]
   # zw = zw_[1]

    pw=zw*a
    zc_2=zw*np.dot(R1_2[2, :], a) +t1_2[2]
    pre2=np.dot(k2,(np.dot(R1_2,pw)+t1_2))/zc_2#
    # dis=np.linalg.norm(pre2- p2)
    return pre2
