# @Time: 2023/6/19 19:20
# @Author: liyuhan
# @File: warper.py

import datetime
import time
import traceback
from pathlib import Path
from typing import Tuple, Optional

import numpy
import skimage.io
import torch
import torch.nn.functional as F
import math

# import Imath
# import OpenEXR
import numpy as np
import json
import cv2


class Warper:
    def __init__(self, resolution: tuple = None, device: str = 'gpu0'):
        self.resolution = resolution
        self.device = self.get_device(device)
        return

    def forward_warp(self, frame1: torch.Tensor, mask1: Optional[torch.Tensor], depth1: torch.Tensor,
                     transformation1: torch.Tensor, transformation2: torch.Tensor, intrinsic1: torch.Tensor,
                     intrinsic2: Optional[torch.Tensor]) -> \
            Tuple[torch.Tensor, torch.Tensor]:  # torch.Tensor, torch.Tensor
        """
        Given a frame1 and global transformations transformation1 and transformation2, warps frame1 to next view using
        bilinear splatting.
        All arrays should be torch tensors with batch dimension and channel first
        :param frame1: (b, 3, h, w). If frame1 is not in the range [-1, 1], either set is_image=False when calling
                        bilinear_splatting on frame within this function, or modify clipping in bilinear_splatting()
                        method accordingly.
        :param mask1: (b, 1, h, w) - 1 for known, 0 for unknown. Optional
        :param depth1: (b, 1, h, w)
        :param transformation1: (b, 4, 4) extrinsic transformation matrix of first view: [R, t; 0, 1]
        :param transformation2: (b, 4, 4) extrinsic transformation matrix of second view: [R, t; 0, 1]
        :param intrinsic1: (b, 3, 3) camera intrinsic matrix
        :param intrinsic2: (b, 3, 3) camera intrinsic matrix. Optional
        """
        transformation1 = transformation1.double()
        transformation2 = transformation2.double()
        if self.resolution is not None:
            assert frame1.shape[2:4] == self.resolution
        b, c, h, w = frame1.shape
        if mask1 is None:
            mask1 = torch.ones(size=(b, 1, h, w)).to(frame1)
        if intrinsic2 is None:
            intrinsic2 = intrinsic1.clone()

        assert frame1.shape == (b, 3, h, w)
        assert mask1.shape == (b, 1, h, w)
        assert depth1.shape == (b, 1, h, w)
        assert transformation1.shape == (b, 4, 4)
        assert transformation2.shape == (b, 4, 4)
        assert intrinsic1.shape == (b, 3, 3)
        assert intrinsic2.shape == (b, 3, 3)

        frame1 = frame1.double().to(self.device)
        mask1 = mask1.to(self.device)
        depth1 = depth1.to(self.device)
        transformation1 = transformation1.to(self.device)
        transformation2 = transformation2.to(self.device)
        intrinsic1 = intrinsic1.to(self.device)
        intrinsic2 = intrinsic2.to(self.device)

        # 转换相机1坐标系下的三维坐标系下的点到 相机2坐标系下的三维坐标系下的点(b,h,w,3,1)
        trans_points1 = self.compute_transformed_points(depth1, transformation1, transformation2, intrinsic1,
                                                        intrinsic2)
        # 求相机2坐标系下的坐标,除以第三维
        trans_coordinates = trans_points1[:, :, :, :2, 0] / (trans_points1[:, :, :, 2:3, 0] + 1e-7)
        # 获取对应的深度值
        trans_depth1 = trans_points1[:, :, :, 2, 0]
        # print(trans_coordinates)  # 被转化后的坐标 -> 有的点在画幅外面，需要进行处理使其值落在0-512

        grid = self.create_grid(b, h, w).to(trans_coordinates)
        flow12 = trans_coordinates.permute(0, 3, 1, 2) - grid

        # 采用双线性插值的方式 合成虚拟视点处的RGB
        warped_frame2, mask2 = self.bilinear_splatting(frame1, mask1, trans_depth1, flow12, None, is_image=True)
        # 采用双线性插值的方式 合成虚拟视点的深度
        warped_depth2 = self.bilinear_splatting(trans_depth1[None, :, :], mask1, trans_depth1, flow12, None,
                                                is_image=False)[0]
        return warped_frame2, warped_depth2

    def compute_transformed_points(self, depth1: torch.Tensor, transformation1: torch.Tensor,
                                   transformation2: torch.Tensor,
                                   intrinsic1: torch.Tensor, intrinsic2: Optional[torch.Tensor]):
        """
        Computes transformed position for each pixel location
        """
        if self.resolution is not None:
            assert depth1.shape[2:4] == self.resolution
        b, _, h, w = depth1.shape
        if intrinsic2 is None:
            intrinsic2 = intrinsic1.clone()
        # 从相机1旋转平移到相机2
        transformation = torch.bmm(transformation2, torch.linalg.inv(transformation1))  # (b, 4, 4)

        # 获取一个齐次坐标  类似于[ x, y, 1]
        x1d = torch.arange(0, w)[None]
        y1d = torch.arange(0, h)[:, None]
        x2d = x1d.repeat([h, 1]).to(depth1)  # (h, w)
        y2d = y1d.repeat([1, w]).to(depth1)  # (h, w)
        ones_2d = torch.ones(size=(h, w)).to(depth1)  # (h, w)
        ones_4d = ones_2d[None, :, :, None, None].repeat([b, 1, 1, 1, 1])  # (b, h, w, 1, 1)
        # 每一个点都是[x, y, 1]的形式
        pos_vectors_homo = torch.stack([x2d, y2d, ones_2d], dim=2)[None, :, :, :, None]  # (1, h, w, 3, 1)
        # 将像素坐标系转化为图像坐标系
        pos_vectors_homo[:, :, :, 0:2, 0] = pos_vectors_homo[:, :, :, 0:2, 0] - 256

        # 求参数 以及维度对齐 方便下面的计算
        # 内参矩阵
        intrinsic1_inv = torch.linalg.inv(intrinsic1)  # (b, 3, 3)
        intrinsic1_inv_4d = intrinsic1_inv[:, None, None]  # (b, 1, 1, 3, 3)
        intrinsic2_4d = intrinsic2[:, None, None]  # (b, 1, 1, 3, 3)
        # 深度
        depth_4d = depth1[:, 0][:, :, :, None, None]  # (b, h, w, 1, 1)
        # 外参转换矩阵
        trans_4d = transformation[:, None, None]  # (b, 1, 1, 4, 4)

        # 二维平面齐次坐标点 通过相机1内参 转换为 三维坐标点 其中第w维是1 没有深度
        unnormalized_pos = torch.matmul(intrinsic1_inv_4d, pos_vectors_homo)  # (b, h, w, 3, 1),(3,3)*(3,1)
        # 第三维乘以深度值，得到唯一确定的三维空间点
        world_points = depth_4d * unnormalized_pos  # (b, h, w, 3, 1)
        # 相当于[x, y, z] 转换为 [x, y, z, 1] 转为齐次坐标点
        world_points_homo = torch.cat([world_points, ones_4d], dim=3)  # (b, h, w, 4, 1) 加一维
        # 乘以相机一到相机二的旋转和平移得到，相机2坐标系下的三维齐次坐标点
        trans_world_homo = torch.matmul(trans_4d, world_points_homo)  # (b, h, w, 4, 1),(4,4)*(4,1)
        # 从[x, y, z, 1] 转为 [x, y, z]
        trans_world = trans_world_homo[:, :, :, :3, :]  # (b, h, w, 3, 1)
        # 乘以相机2的内参 得到相机2坐标系下的空间点
        trans_norm_points = torch.matmul(intrinsic2_4d, trans_world)  # (b, h, w, 3, 1)
        return trans_norm_points

    # 进行blender虚拟场景的相机内参估计
    def camera_intrinsic_transform(self, vfov=20, hfov=20, pixel_width=512, pixel_height=512):
        camera_intrinsics = np.zeros((3, 3))
        camera_intrinsics[2, 2] = 1
        camera_intrinsics[0, 0] = (pixel_width / 2.0) / math.tan(math.radians(hfov / 2.0))
        camera_intrinsics[0, 2] = pixel_width / 2.0
        camera_intrinsics[1, 1] = (pixel_height / 2.0) / math.tan(math.radians(vfov / 2.0))
        camera_intrinsics[1, 2] = pixel_height / 2.0
        return camera_intrinsics

    def bilinear_splatting(self, frame1: torch.Tensor, mask1: Optional[torch.Tensor], depth1: torch.Tensor,
                           flow12: torch.Tensor, flow12_mask: Optional[torch.Tensor], is_image: bool = False) -> \
            Tuple[torch.Tensor, torch.Tensor]:
        """
        Bilinear splatting
        :param frame1: (b,c,h,w)
        :param mask1: (b,1,h,w): 1 for known, 0 for unknown. Optional
        :param depth1: (b,1,h,w)
        :param flow12: (b,2,h,w)
        :param flow12_mask: (b,1,h,w): 1 for valid flow, 0 for invalid flow. Optional
        :param is_image: if true, output will be clipped to (-1,1) range
        :return: warped_frame2: (b,c,h,w)
                 mask2: (b,1,h,w): 1 for known and 0 for unknown
        """
        if self.resolution is not None:
            assert frame1.shape[2:4] == self.resolution
        b, c, h, w = frame1.shape
        if mask1 is None:
            mask1 = torch.ones(size=(b, 1, h, w)).to(frame1)
        if flow12_mask is None:
            flow12_mask = torch.ones(size=(b, 1, h, w)).to(flow12)
        grid = self.create_grid(b, h, w).to(frame1)
        trans_pos = flow12 + grid

        # 对映射点进行变化，使其落在0-512的画布中
        mn_w = torch.min(trans_pos[:, 0])
        mx_w = torch.max(trans_pos[:, 0])
        mn_h = torch.min(trans_pos[:, 1])
        mx_h = torch.max(trans_pos[:, 1])
        # print("h:", mn_h, "w:", mn_w)
        trans_pos[:, 0] = ((trans_pos[:, 0] - mn_w) / (mx_w - mn_w)) * 512
        trans_pos[:, 1] = ((trans_pos[:, 1] - mn_h) / (mx_h - mn_h)) * 512
        # print(trans_pos)
        trans_pos[torch.isnan(trans_pos)] = 65536

        # 假设有一点的坐标为x, y 下面的代码是为了 计算x-1，x+1 和 y-1, y+1
        trans_pos_offset = trans_pos + 1
        trans_pos_floor = torch.floor(trans_pos_offset).long()
        trans_pos_ceil = torch.ceil(trans_pos_offset).long()
        trans_pos_offset = torch.stack([
            torch.clamp(trans_pos_offset[:, 0], min=0, max=w + 1),
            torch.clamp(trans_pos_offset[:, 1], min=0, max=h + 1)], dim=1)
        trans_pos_floor = torch.stack([
            torch.clamp(trans_pos_floor[:, 0], min=0, max=w + 1),
            torch.clamp(trans_pos_floor[:, 1], min=0, max=h + 1)], dim=1)
        trans_pos_ceil = torch.stack([
            torch.clamp(trans_pos_ceil[:, 0], min=0, max=w + 1),
            torch.clamp(trans_pos_ceil[:, 1], min=0, max=h + 1)], dim=1)

        # 计算双线性插值中的 坐标点 与左上左下右上右下位置的权重
        prox_weight_nw = (1 - (trans_pos_offset[:, 1:2] - trans_pos_floor[:, 1:2])) * \
                         (1 - (trans_pos_offset[:, 0:1] - trans_pos_floor[:, 0:1]))
        prox_weight_sw = (1 - (trans_pos_ceil[:, 1:2] - trans_pos_offset[:, 1:2])) * \
                         (1 - (trans_pos_offset[:, 0:1] - trans_pos_floor[:, 0:1]))
        prox_weight_ne = (1 - (trans_pos_offset[:, 1:2] - trans_pos_floor[:, 1:2])) * \
                         (1 - (trans_pos_ceil[:, 0:1] - trans_pos_offset[:, 0:1]))
        prox_weight_se = (1 - (trans_pos_ceil[:, 1:2] - trans_pos_offset[:, 1:2])) * \
                         (1 - (trans_pos_ceil[:, 0:1] - trans_pos_offset[:, 0:1]))

        # 这里将深度中的nan去掉
        depth1[torch.isnan(depth1)] = 65536
        sat_depth1 = torch.clamp(depth1, min=0, max=1000)
        log_depth1 = torch.log(1 + sat_depth1)
        depth_weights = torch.exp(log_depth1 / log_depth1.max() * 50)

        # 获取最后的双线性插值的权重
        weight_nw = torch.moveaxis(prox_weight_nw * mask1 * flow12_mask / depth_weights, [0, 1, 2, 3], [0, 3, 1, 2])
        weight_sw = torch.moveaxis(prox_weight_sw * mask1 * flow12_mask / depth_weights, [0, 1, 2, 3], [0, 3, 1, 2])
        weight_ne = torch.moveaxis(prox_weight_ne * mask1 * flow12_mask / depth_weights, [0, 1, 2, 3], [0, 3, 1, 2])
        weight_se = torch.moveaxis(prox_weight_se * mask1 * flow12_mask / depth_weights, [0, 1, 2, 3], [0, 3, 1, 2])

        # 创建一个空白图像 用于填充像素
        warped_frame = torch.zeros(size=(b, h + 2, w + 2, c), dtype=torch.float32).to(frame1)
        warped_weights = torch.zeros(size=(b, h + 2, w + 2, 1), dtype=torch.float32).to(frame1)
        frame1_cl = torch.moveaxis(frame1, [0, 1, 2, 3], [0, 3, 1, 2])
        batch_indices = torch.arange(b)[:, None, None].to(frame1.device)

        # 做插值
        warped_frame.index_put_((batch_indices, trans_pos_floor[:, 1], trans_pos_floor[:, 0]),
                                frame1_cl * weight_nw, accumulate=True)
        warped_frame.index_put_((batch_indices, trans_pos_ceil[:, 1], trans_pos_floor[:, 0]),
                                frame1_cl * weight_sw, accumulate=True)
        warped_frame.index_put_((batch_indices, trans_pos_floor[:, 1], trans_pos_ceil[:, 0]),
                                frame1_cl * weight_ne, accumulate=True)
        warped_frame.index_put_((batch_indices, trans_pos_ceil[:, 1], trans_pos_ceil[:, 0]),
                                frame1_cl * weight_se, accumulate=True)

        warped_weights.index_put_((batch_indices, trans_pos_floor[:, 1], trans_pos_floor[:, 0]),
                                  weight_nw, accumulate=True)
        warped_weights.index_put_((batch_indices, trans_pos_ceil[:, 1], trans_pos_floor[:, 0]),
                                  weight_sw, accumulate=True)
        warped_weights.index_put_((batch_indices, trans_pos_floor[:, 1], trans_pos_ceil[:, 0]),
                                  weight_ne, accumulate=True)
        warped_weights.index_put_((batch_indices, trans_pos_ceil[:, 1], trans_pos_ceil[:, 0]),
                                  weight_se, accumulate=True)

        warped_frame_cf = torch.moveaxis(warped_frame, [0, 1, 2, 3], [0, 2, 3, 1])
        warped_weights_cf = torch.moveaxis(warped_weights, [0, 1, 2, 3], [0, 2, 3, 1])
        cropped_warped_frame = warped_frame_cf[:, :, 1:-1, 1:-1]
        cropped_weights = warped_weights_cf[:, :, 1:-1, 1:-1]

        mask = cropped_weights > 0
        zero_value = 0
        zero_tensor = torch.tensor(zero_value, dtype=frame1.dtype, device=frame1.device)
        warped_frame2 = torch.where(mask, cropped_warped_frame / cropped_weights, zero_tensor)
        mask2 = mask.to(frame1)

        if is_image:
            warped_frame2 = torch.clamp(warped_frame2, min=0, max=255)
        return warped_frame2, mask2

    def bilinear_interpolation(self, frame2: torch.Tensor, mask2: Optional[torch.Tensor], flow12: torch.Tensor,
                               flow12_mask: Optional[torch.Tensor], is_image: bool = False) -> \
            Tuple[torch.Tensor, torch.Tensor]:
        """
        Bilinear interpolation
        :param frame2: (b, c, h, w)
        :param mask2: (b, 1, h, w): 1 for known, 0 for unknown. Optional
        :param flow12: (b, 2, h, w)
        :param flow12_mask: (b, 1, h, w): 1 for valid flow, 0 for invalid flow. Optional
        :param is_image: if true, output will be clipped to (-1,1) range
        :return: warped_frame1: (b, c, h, w)
                 mask1: (b, 1, h, w): 1 for known and 0 for unknown
        """
        if self.resolution is not None:
            assert frame2.shape[2:4] == self.resolution
        b, c, h, w = frame2.shape
        if mask2 is None:
            mask2 = torch.ones(size=(b, 1, h, w)).to(frame2)
        if flow12_mask is None:
            flow12_mask = torch.ones(size=(b, 1, h, w)).to(flow12)
        grid = self.create_grid(b, h, w).to(frame2)
        trans_pos = flow12 + grid

        trans_pos_offset = trans_pos + 1
        trans_pos_floor = torch.floor(trans_pos_offset).long()
        trans_pos_ceil = torch.ceil(trans_pos_offset).long()
        trans_pos_offset = torch.stack([
            torch.clamp(trans_pos_offset[:, 0], min=0, max=w + 1),
            torch.clamp(trans_pos_offset[:, 1], min=0, max=h + 1)], dim=1)
        trans_pos_floor = torch.stack([
            torch.clamp(trans_pos_floor[:, 0], min=0, max=w + 1),
            torch.clamp(trans_pos_floor[:, 1], min=0, max=h + 1)], dim=1)
        trans_pos_ceil = torch.stack([
            torch.clamp(trans_pos_ceil[:, 0], min=0, max=w + 1),
            torch.clamp(trans_pos_ceil[:, 1], min=0, max=h + 1)], dim=1)

        prox_weight_nw = (1 - (trans_pos_offset[:, 1:2] - trans_pos_floor[:, 1:2])) * \
                         (1 - (trans_pos_offset[:, 0:1] - trans_pos_floor[:, 0:1]))
        prox_weight_sw = (1 - (trans_pos_ceil[:, 1:2] - trans_pos_offset[:, 1:2])) * \
                         (1 - (trans_pos_offset[:, 0:1] - trans_pos_floor[:, 0:1]))
        prox_weight_ne = (1 - (trans_pos_offset[:, 1:2] - trans_pos_floor[:, 1:2])) * \
                         (1 - (trans_pos_ceil[:, 0:1] - trans_pos_offset[:, 0:1]))
        prox_weight_se = (1 - (trans_pos_ceil[:, 1:2] - trans_pos_offset[:, 1:2])) * \
                         (1 - (trans_pos_ceil[:, 0:1] - trans_pos_offset[:, 0:1]))

        weight_nw = torch.moveaxis(prox_weight_nw * flow12_mask, [0, 1, 2, 3], [0, 3, 1, 2])
        weight_sw = torch.moveaxis(prox_weight_sw * flow12_mask, [0, 1, 2, 3], [0, 3, 1, 2])
        weight_ne = torch.moveaxis(prox_weight_ne * flow12_mask, [0, 1, 2, 3], [0, 3, 1, 2])
        weight_se = torch.moveaxis(prox_weight_se * flow12_mask, [0, 1, 2, 3], [0, 3, 1, 2])

        frame2_offset = F.pad(frame2, [1, 1, 1, 1])
        mask2_offset = F.pad(mask2, [1, 1, 1, 1])
        bi = torch.arange(b)[:, None, None]

        f2_nw = frame2_offset[bi, :, trans_pos_floor[:, 1], trans_pos_floor[:, 0]]
        f2_sw = frame2_offset[bi, :, trans_pos_ceil[:, 1], trans_pos_floor[:, 0]]
        f2_ne = frame2_offset[bi, :, trans_pos_floor[:, 1], trans_pos_ceil[:, 0]]
        f2_se = frame2_offset[bi, :, trans_pos_ceil[:, 1], trans_pos_ceil[:, 0]]

        m2_nw = mask2_offset[bi, :, trans_pos_floor[:, 1], trans_pos_floor[:, 0]]
        m2_sw = mask2_offset[bi, :, trans_pos_ceil[:, 1], trans_pos_floor[:, 0]]
        m2_ne = mask2_offset[bi, :, trans_pos_floor[:, 1], trans_pos_ceil[:, 0]]
        m2_se = mask2_offset[bi, :, trans_pos_ceil[:, 1], trans_pos_ceil[:, 0]]

        nr = weight_nw * f2_nw * m2_nw + weight_sw * f2_sw * m2_sw + \
             weight_ne * f2_ne * m2_ne + weight_se * f2_se * m2_se
        dr = weight_nw * m2_nw + weight_sw * m2_sw + weight_ne * m2_ne + weight_se * m2_se

        zero_value = -1 if is_image else 0
        zero_tensor = torch.tensor(zero_value, dtype=nr.dtype, device=nr.device)
        warped_frame1 = torch.where(dr > 0, nr / dr, zero_tensor)
        mask1 = (dr > 0).to(frame2)

        # Convert to channel first
        warped_frame1 = torch.moveaxis(warped_frame1, [0, 1, 2, 3], [0, 2, 3, 1])
        mask1 = torch.moveaxis(mask1, [0, 1, 2, 3], [0, 2, 3, 1])

        if is_image:
            assert warped_frame1.min() >= -1.1  # Allow for rounding errors
            assert warped_frame1.max() <= 1.1
            warped_frame1 = torch.clamp(warped_frame1, min=-1, max=1)
        return warped_frame1, mask1

    def normal_estimate(self, depth_im):

        # assert len(depth_im.shape) == 3, "batch width height"
        b, c, h, w = depth_im.shape
        if b == 1:
            zy, zx = torch.gradient(depth_im[0, 0])
        else:
            _, zy, zx = torch.gradient(depth_im[:, 0])

        zy, zx = zy.unsqueeze(-1), zx.unsqueeze(-1)

        if b == 1:
            normal = torch.cat((-zx, -zy, torch.ones_like(zy)), dim=-1)[None]
        else:
            normal = torch.cat((-zx, -zy, torch.ones_like(zy)), dim=-1)
        n = torch.sqrt(normal.pow(2).sum(-1))

        normal[:, :, :, 0] /= n
        normal[:, :, :, 1] /= n
        normal[:, :, :, 2] /= n

        # offset and rescale values to be in 0-255
        normal += 1
        normal /= 2
        normal *= 255
        return normal

    @staticmethod
    def create_grid(b, h, w):
        x_1d = torch.arange(0, w)[None]
        y_1d = torch.arange(0, h)[:, None]
        x_2d = x_1d.repeat([h, 1])
        y_2d = y_1d.repeat([1, w])
        grid = torch.stack([x_2d, y_2d], dim=0)
        batch_grid = grid[None].repeat([b, 1, 1, 1])
        return batch_grid

    @staticmethod
    def read_image(path: Path) -> torch.Tensor:
        image = skimage.io.imread(path.as_posix())
        return image

    @staticmethod
    def read_depth(path: Path) -> torch.Tensor:
        if path.suffix == '.png':
            depth = skimage.io.imread(path.as_posix())
        elif path.suffix == '.npy':
            depth = numpy.load(path.as_posix())
        elif path.suffix == '.npz':
            with numpy.load(path.as_posix()) as depth_data:
                depth = depth_data['depth']
        # elif path.suffix == '.exr':
        #     exr_file = OpenEXR.InputFile(path.as_posix())
        #     raw_bytes = exr_file.channel('Y', Imath.PixelType(Imath.PixelType.FLOAT))
        #     depth_vector = numpy.frombuffer(raw_bytes, dtype=numpy.float32)
        #     height = exr_file.header()['displayWindow'].max.y + 1 - exr_file.header()['displayWindow'].min.y
        #     width = exr_file.header()['displayWindow'].max.x + 1 - exr_file.header()['displayWindow'].min.x
        #     depth = numpy.reshape(depth_vector, (height, width))
        else:
            raise RuntimeError(f'Unknown depth format: {path.suffix}')
        return depth

    # @staticmethod
    # def camera_intrinsic_transform(capture_width=1920, capture_height=1080, patch_start_point: tuple = (0, 0)):
    #     start_y, start_x = patch_start_point
    #     camera_intrinsics = numpy.eye(4)
    #     camera_intrinsics[0, 0] = 2100
    #     camera_intrinsics[0, 2] = capture_width / 2.0 - start_x
    #     camera_intrinsics[1, 1] = 2100
    #     camera_intrinsics[1, 2] = capture_height / 2.0 - start_y
    #     return camera_intrinsics

    @staticmethod
    def get_device(device: str):
        """
        Returns torch device object
        :param device: cpu/gpu0/gpu1
        :return:
        """
        if device == 'cpu':
            device = torch.device('cpu')
        elif device.startswith('gpu') and torch.cuda.is_available():
            gpu_num = int(device[3:])
            device = torch.device(f'cuda:{gpu_num}')
        else:
            device = torch.device('cpu')
        return device


def demo1():
    # 文件路径
    frame1_path = Path(r'C:\Users\Asher\Desktop\0000\images\0000.png')
    frame2_path = Path(r'C:\Users\Asher\Desktop\0000\images\0009.png')
    depth1_path = Path(r'C:\Users\Asher\Desktop\0000\depths\0000.exr')
    depth2_path = Path(r'C:\Users\Asher\Desktop\0000\depths\0009.exr')
    posespath1 = r'C:\Users\Asher\Desktop\0000\poses\0000.json'
    posespath2 = r'C:\Users\Asher\Desktop\0000\poses\0009.json'

    # 获取相机的参数 内外参
    f = open(posespath1)
    dict = json.load(f)
    cy = dict['c_y']
    cx = dict['c_x']
    fx = dict['f_x']
    fy = dict['f_y']
    intrinsic = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
    transformation1 = np.array(dict['extrinsic'], dtype=np.float64)

    f = open(posespath2)
    dict = json.load(f)
    cy = dict['c_y']
    cx = dict['c_x']
    fx = dict['f_x']
    fy = dict['f_y']
    intrinsic2 = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
    transformation2 = np.array(dict['extrinsic'], dtype=np.float64)

    # 获取图片
    warper = Warper()
    frame1 = warper.read_image(frame1_path).astype(np.float64)  # 这个数据类型转为float64 因为后面需要
    frame2 = warper.read_image(frame2_path).astype(np.float64)
    depth1 = warper.read_depth(depth1_path).astype(np.float64)
    depth2 = warper.read_depth(depth2_path).astype(np.float64)
    mask = np.isinf(depth1)
    mask2 = np.isinf(depth2)
    frame1[mask] = 0
    frame2[mask2] = 0
    depth1[mask] = 0
    depth2[mask2] = 0
    # print(np.isinf(depth1))
    save_depth = (depth1 - depth1.min()) / (depth1.max() - depth1.min()) * 255
    # np.clip(save_depth, 30, 255)
    skimage.io.imsave('depth1gt.png', save_depth.astype(np.uint8))
    # skimage.io.imsave('depth2gt.png', save_depth.astype(np.uint8))
    skimage.io.imsave('frame1.png', frame1.astype(np.uint8))
    skimage.io.imsave('frame2.png', frame2.astype(np.uint8))

    # 转换数据类型 并且转为 （b, c, h, w)形式
    frame1 = torch.from_numpy(frame1).unsqueeze(0).permute(0, 3, 1, 2)
    frame2 = torch.from_numpy(frame2).unsqueeze(0).permute(0, 3, 1, 2)
    depth1 = torch.from_numpy(depth1).unsqueeze(0).unsqueeze(0)
    depth2 = torch.from_numpy(depth2).unsqueeze(0).unsqueeze(0)
    transformation1 = torch.from_numpy(transformation1).unsqueeze(0)
    transformation2 = torch.from_numpy(transformation2).unsqueeze(0)
    intrinsic = torch.from_numpy(intrinsic).unsqueeze(0)

    # 输入数据 生成虚拟视点的RGB以及深度 normal
    warped_frame2, warp_depth = warper.forward_warp(frame1, None, depth1, transformation1, transformation2, intrinsic,
                                                    None)
    normal = warper.normal_estimate(warp_depth)

    # 调整格式 保存图像
    warp_depth = torch.clamp(warp_depth, 0, 255)
    warp_depth = warp_depth.permute(2, 3, 1, 0).detach().cpu().numpy().astype(np.uint8)
    depth1 = torch.clamp(depth1, 0, 255)
    depth1 = depth1.permute(2, 3, 1, 0).detach().cpu().numpy().astype(np.uint8)
    depth2 = torch.clamp(depth2, 0, 255)
    depth2 = depth2.permute(2, 3, 1, 0).detach().cpu().numpy().astype(np.uint8)
    warped_frame2 = warped_frame2.permute(2, 3, 1, 0).detach().cpu().numpy().astype(np.uint8)
    save_image = warped_frame2[:, :, :, 0]
    save_image[mask2] = 0
    skimage.io.imsave('frame2_warped.png', save_image)
    skimage.io.imsave('depth1.png', depth1[:, :, 0, 0])
    skimage.io.imsave('depth2.png', depth2[:, :, 0, 0])

    return


def main():
    demo1()
    return


if __name__ == '__main__':
    print('Program started at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p'))
    start_time = time.time()
    try:
        main()
    except Exception as e:
        print(e)
        traceback.print_exc()
    end_time = time.time()
    print('Program ended at ' + datetime.datetime.now().strftime('%d/%m/%Y %I:%M:%S %p'))
    print('Execution time: ' + str(datetime.timedelta(seconds=end_time - start_time)))
