# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import os
import cv2
import torch
import argparse
import numpy as np
import time
import math
from PIL import Image
from cotracker.utils.visualizer import Visualizer, read_video_from_path, read_image_from_path
from cotracker.predictor import CoTrackerPredictor

DEFAULT_DEVICE = ('cuda' if torch.cuda.is_available() else
                  'mps' if torch.backends.mps.is_available() else
                  'cpu')

def readCoorDinates(path):
    coors = []
    preds = []
    with open(path) as f:
        lines = f.readlines()

        for line in lines:
            values = line.split() 
            id = int(values[0])
            
            x = float(values[1])
            y = float(values[2]) 

            x2 = float(values[3])
            y2 = float(values[4])

            t = [id, x, y, x2, y2]
            coors.append(t)

    return coors

def getFrames(coors, i):
    total = len(coors)
    if i >= total:
        return [[],-1,[]]

    startFrame = coors[i][0]

    res = []
    preds = []

    endFrame = startFrame + 1 #读入多少帧的坐标进行跟踪
    while(True):
        if i < total and coors[i][0] < endFrame:
            t1 = [coors[i][0], coors[i][1], coors[i][2]]
            t2 = [coors[i][0], coors[i][3], coors[i][4]]
            res.append(t1)
            preds.append(t2)

            i = i+1
        else:
            break
    return res, i, preds

def saveCoors(
    video: torch.Tensor,
    tracks: torch.Tensor,
    visibility: torch.Tensor = None,
    startFrame: int = 0
):
    B, T, C, H, W = video.shape
    _, _, N, D = tracks.shape

    tracks = tracks[0].long().detach().cpu().numpy()  # S, N, 2

    with open('coors_data.txt','a') as f:
        t = 1 #对应起始帧的下一帧
        for i in range(N):    
            visibile = visibility[0, t, i]
            coord = (tracks[t, i, 0], tracks[t, i, 1])
            if visibile:
                line = str(t+startFrame) + ' ' + str(tracks[t, i, 0]) + ' ' + str(tracks[t, i, 1]) + '\n'
                f.write(line)
            else:
                line = str(t+startFrame) + ' ' + '-1' + ' ' + '-1' + '\n'
                f.write(line)
                #print(coord)

def checkCoors(slamCoors, tracks: torch.Tensor, visibility: torch.Tensor):
    _, _, N, D = tracks.shape
    tracks = tracks[0].long().detach().cpu().numpy()  # S, N, 2
    t = 1

    res = 0
    for i in range(N):    
        visibile = visibility[0, t, i]    

        if visibile:
            p1 = (slamCoors[i][1], slamCoors[i][2]) # 点1坐标
            p2 = (tracks[t,i,0], tracks[t,i,1]) # 点2坐标

            dist = math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)  

            # if abs(slamCoors[i][1] - tracks[t, i, 0]) > 3.0 or abs(slamCoors[i][2] - tracks[t, i, 1]) > 3.0 :
            #     res += 1
            if dist >= 10.0:
                res += 1
    return res

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--video_path",
        default="./assets/apple.mp4",
        help="path to a video",
    )
    parser.add_argument(
        "--mask_path",
        default="./assets/apple_mask.png",
        help="path to a segmentation mask",
    )
    parser.add_argument(
        "--checkpoint",
        default="./checkpoints/cotracker_stride_4_wind_8.pth",
        help="cotracker model",
    )
    parser.add_argument("--grid_size", type=int, default=0, help="Regular grid size")
    parser.add_argument(
        "--grid_query_frame",
        type=int,
        default=0,
        help="Compute dense and grid tracks starting from this frame ",
    )

    parser.add_argument(
        "--backward_tracking",
        action="store_true",
        help="Compute tracks in both directions, not only forward",
    )

    args = parser.parse_args()

    #coors = [[3, 50, 50], [3, 80, 50], [3, 300, 300]]
    coorPath = r"E:\\gerrit\\co-tracker\\calcOpticalFlowPyrLK.txt"
    coorsFull = readCoorDinates(coorPath)
    # print('coors is ', coors)

    start = 0
    handle = 0 

    with open('coors_data.txt','w') as f:
        f.truncate() # 文件清空操作

    videoFull = read_image_from_path(args.video_path)

    while(handle < 2400):
        co, i, preds = getFrames(coorsFull, start)
        if i == -1:
            break
        #print("coor is ", co)

        coors = co
        cnt = len(co)
        startFrame = co[0][0]
        endFrame = co[cnt-1][0]

        for coor in coors:
            coor[0] -= startFrame

        print('startFrame ', startFrame, ' endFrame ', endFrame)
        # load the input video frame by frame
        #video = read_video_from_path(args.video_path)

        video = videoFull[startFrame-1:endFrame+11] # -1 是因为前两列坐标是前一帧中的坐标

        #print('video length', len(video))
        video = torch.from_numpy(video).permute(0, 3, 1, 2)[None].float()


        segm_mask = np.array(Image.open(os.path.join(args.mask_path)))
        segm_mask = torch.from_numpy(segm_mask)[None, None]

        model = CoTrackerPredictor(checkpoint=args.checkpoint)
        model = model.to(DEFAULT_DEVICE)
        video = video.to(DEFAULT_DEVICE)

        ### exit()

        query = torch.tensor(coors).float()
        query = query.to(DEFAULT_DEVICE)

        start = time.time()
        pred_tracks, pred_visibility = model(
            video,
            #grid_size=args.grid_size,
            #grid_query_frame=args.grid_query_frame,
            backward_tracking=False, #args.backward_tracking,
            # segm_mask=segm_mask
            queries=query[None]
        )
        end = time.time()
        print("Elapsed: ", end - start)    
        print("computed")
        saveCoors(video, pred_tracks, pred_visibility, startFrame-1)

        # save a video with predicted tracks
        # seq_name = args.video_path.split("/")[-1]
        # vis = Visualizer(save_dir="./saved_videos", pad_value=120, linewidth=3)
        # vis.visualize(video, pred_tracks, pred_visibility, query_frame=args.grid_query_frame)

        err = checkCoors(preds, pred_tracks, pred_visibility)
        print('error track cnt is ', err)

        start = i
        handle = handle + 1
