import os
import cv2
from matplotlib import pyplot as plt
import numpy as np
# from torchvision.io import VideoReader
from tqdm import tqdm

os.environ['TORCH_HOME']='H:/shaoming/.cache/torch'
# 数据集快捷获取源; huggingface
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"


import torch
# from torchvision.transforms.functional import to_tensor, to_pil_image
# from torchvision.models import segmentation
# from midas.model_loader import load_model

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

"""GitHub - isl-org/MiDaS: Code for robust monocular depth estimation described in 
"Ranftl et. al., Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer, TPAMI 2022"
MiDaS（Monocular Depth Sensing）是一种基于单目深度估计的技术，它通过深度学习方法使用单张RGB图像（普通2D图像）来估算场景的深度图（Depth Map）。
相比于传统的依赖专用深度传感器（如LiDAR或ToF相机）的深度感知方法，MiDaS 不需要额外的硬件，仅依赖普通的单目摄像头即可对场景的深度进行预测。
MiDaS 原理解析：
1. 什么是深度估计？
在计算机视觉中，深度估计旨在为场景中的每个像素估算与摄像机的距离。这种深度信息可以用灰度图表示：
场景中的物体越近，像素值越亮（深度越小）。
场景中的物体越远，像素值越暗（深度越大）。
MiDaS 生成的结果通常是标准化后的相对深度（Relative Depth），而非绝对物理尺度上的距离。
将神经网络输出的深度图标准化（例如归一化到0-255），方便视觉化或后续任务使用。
"""

def midas_model():
    # 这里以 Midas 为例
    model_type = "DPT_Large"     # MiDaS v3 - Large     (highest accuracy, slowest inference speed)
    # model_path = "weights/dpt_large.pt"

    midas = torch.hub.load("intel-isl/MiDaS", model_type)
    midas.to(device)
    midas.eval()

    midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")

    if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
        transform = midas_transforms.dpt_transform
    else:
        transform = midas_transforms.small_transform
        
    return midas, transform

def estimate_depth(midas, transform, frame):
    # filename = "elbow/video/00001.jpg"

    # img = cv2.imread(filename)
    # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    input_batch = transform(frame).to(device)

    with torch.no_grad():
        prediction = midas(input_batch)

        prediction = torch.nn.functional.interpolate(
            prediction.unsqueeze(1),
            size=frame.shape[:2],
            mode="bicubic",
            align_corners=False,
        ).squeeze()

    depth = prediction.cpu().numpy()

    # print(depth)

    return depth

def draw_depth(depth):
    plt.imshow(depth)
    plt.show()



def generate_one_depth(filename = "elbow/video/00001.jpg"):
    midas, transform = midas_model()
    
    img = cv2.imread(filename)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    
    depth = estimate_depth(midas, transform, img)
    
    # np.save("depth.npy", depth)    
    # depth = np.load("depth.npy")    
    # np.savetxt("depth.txt", depth, fmt='%f', )    
    draw_depth(depth)

def sequence_depth_generator():

    # read filenames from directory
    directory = "elbow/video"
    filenames = os.listdir(directory)
    
    midas, transform = midas_model()
    
    depthes = []
    for filename in tqdm(filenames, total=len(filenames)):
        img = cv2.imread(os.path.join(directory, filename))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        depth = estimate_depth(midas, transform, img)
        depthes.append(depth)
    
    depthes = np.stack(depthes, axis=0)
    
    np.save(f"{directory.split('/')[0]}/depthes.npy", depthes)    

def extract_one_depth_traj():
    depthes = np.load("elbow/depthes.npy")    
    # depthes = depthes.reshape((136, -1, 1280))
    # np.save(f"elbow/depthes.npy", depthes)    
    
    print(depthes.shape)
    
    traj = np.loadtxt("elbow/track_724_com_point_traj.txt")
    
    # 从视频中找一个相对深度稳定的点，作为参考点
    rp = (550, 1000)
    
    ref_depth = []
    trj_depth = []
    
    for i, dep in enumerate(depthes):
        ref_depth.append(round(dep[rp[0], rp[1]], 2))
        
        trj_depth.append(round(dep[int(traj[i, 0]), int(traj[i, 1])], 2))
        # plt.imshow(depthes[i])
        # plt.show()
        
    print(f"ref_depth: max: {np.max(ref_depth)}, min: {np.min(ref_depth)}, mean: {np.mean(ref_depth)}, std: {np.std(ref_depth)}")
    print(f"ref_depth: {ref_depth}")
    
    print(f"trj_depth: max: {np.max(trj_depth)}, min: {np.min(trj_depth)}, mean: {np.mean(trj_depth)}, std: {np.std(trj_depth)}")
    print(f"trj_depth: {trj_depth}")
    
    diff_dep = np.diff(ref_depth)
    trj_depth[1:] += diff_dep
    print("adjusted trj_depth:")
    print(f"trj_depth: max: {np.max(trj_depth)}, min: {np.min(trj_depth)}, mean: {np.mean(trj_depth)}, std: {np.std(trj_depth)}")
    print(f"trj_depth: {trj_depth}")
    
    np.savetxt("elbow/trj_depth.txt", trj_depth, fmt='%f', )    

if __name__ == '__main__':
    # sequence_depth_generator()
    
    # generate_one_depth()
    extract_one_depth_traj()
    
    