"""
!/usr/bin/env python
-*- coding: utf-8 -*-
@CreateTime : 2024/7/2 10:11
@Author  :    AnimateX
@Contact :    animatex@163.com
@File    :    utils.py
@License :    Copyright © 2024 AnimateX. All rights reserved.
@Version :    utils_VER0.1

------------------------------------------------------------------
# @Description:

------------------------------------------------------------------
"""
import os
import csv
import cv2
import glob
import json
import hashlib
import linecache
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt


class Utils(object):
    def __init__(self):
        pass

    @staticmethod
    def loadRawImg(raw_img_path: str,
                   data_type=np.uint16,
                   size=(240, 320)):
        """

        :param raw_img_path:
        :param data_type:
        :param size: (height, width)
        :return:
        """
        if not os.path.exists(raw_img_path):
            print('[ERR] Raw file not exists!')
            return None

        img = np.fromfile(raw_img_path, dtype=data_type)

        if img.shape[0] == 0:
            img = None
        else:
            img = np.reshape(img, size)

        return img

    @staticmethod
    def readImage(img_path: str,
                  img_w: int,
                  img_h: int,
                  data_type=np.uint16,
                  mode='ir'):
        """

        :param img_path:
        :param img_w:
        :param img_h:
        :param data_type:
        :param mode:        rgb(channel: 3, uchar), ir(channel: 1, uchar), speckle(channel: 1, ushort)
        :return:
        """
        file_extension = img_path.split('.')[-1]

        if file_extension.lower() == 'raw':
            img = Utils().loadRawImg(img_path, data_type=data_type, size=(img_h, img_w))
        elif file_extension.lower() == 'png':
            if mode == 'rgb':
                img = cv2.imread(img_path, cv2.IMREAD_COLOR)
            else:
                img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
        elif file_extension.lower() == 'jpg':
            if mode == 'rgb':
                img = cv2.imread(img_path, cv2.IMREAD_COLOR)
            else:
                img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
        else:
            print(f' [ERR] Not support this file suffix: {file_extension}')
            img = None

        return img

    @staticmethod
    def saveImage(save_img_path: str,
                  img,
                  mode='raw'):
        """

        :param save_img_path:
        :param img:
        :param mode:
        :return:
        """
        if mode == 'raw':
            img.tofile(save_img_path)
        else:
            cv2.imwrite(save_img_path, img)

    @staticmethod
    def findFilesWithPathlibRecursive(directory,
                                      pattern1: str,
                                      pattern2: str):
        """

        :param directory:
        :param pattern1:
        :param pattern2:
        :return:
        """
        files = []
        for p in Path(directory).rglob('*'):
            if p.is_file() and pattern1 in p.name and pattern2 in p.name:
                files.append(str(p))

        return files

    @staticmethod
    def findFilesWithGlob(directory, pattern):
        """

        :param directory:
        :param pattern:
        """
        search_pattern = os.path.join(directory, '**', pattern)
        return glob.glob(search_pattern, recursive=True)

    @staticmethod
    def saveDictToCsv(data_dict,
                      filename,
                      headers):
        with open(filename, mode='w', newline='') as file:
            writer = csv.writer(file)
            writer.writerow(headers)  # 写入标题

            for distance, data in data_dict.items():
                row = [distance]  # 开始构建每行的数据，首先加入distance
                for header in headers[1:]:  # 第一个header是'distance'，已经处理，所以跳过
                    if header in data:
                        row.append(data[header])  # 如果当前距离的字典中有该键，添加它的值
                    else:
                        row.append(None)  # 否则添加None作为缺失值
                writer.writerow(row)  # 将这一行数据写入文件

    @staticmethod
    def saveDictToJson(data, file_path):
        """

        :param data:
        :param file_path:
        :return:
        """
        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=4)
            print(f"Dictionary has been saved successfully to {file_path}")
        except IOError as e:
            print(f"Error saving dictionary to {file_path}: {e}")
        except TypeError as e:
            print(f"Type error in the dictionary data: {e}")

    @staticmethod
    def get_specific_line(filename, line_number):
        line = linecache.getline(filename, line_number)
        return line.strip()

    # Using readline()
    @staticmethod
    def read_file_line_by_line(filename):
        with open(filename, 'r') as file:
            while True:
                line = file.readline()
                if not line:
                    break
                print(line.strip())

    # Using readlines()
    @staticmethod
    def read_all_lines_at_once(filename):
        with open(filename, 'r') as file:
            lines = file.readlines()
        return [line.strip() for line in lines]

    @staticmethod
    def filter_depth_image_edge(img, threshold=200.0):
        # Calculate gradients along x and y directions
        grad_x = cv2.Sobel(img, cv2.CV_32F, 1, 0)
        grad_y = cv2.Sobel(img, cv2.CV_32F, 0, 1)

        # Compute gradient magnitude
        grad = cv2.magnitude(grad_x, grad_y)

        # Create a mask for flying pixels where gradient magnitude is greater than the threshold
        mask = grad > threshold

        # Set flying pixels to 0
        img[mask] = 0

        return img  # Optional, for clarity

    @staticmethod
    def plot_point_cloud_side_view(point_cloud, base_dis, cam_dis, delta, title: str, ax, view='XZ', en_single_line=True):
        """
        Plots side views of the point cloud.

        Args:
        point_cloud (np.ndarray): Nx3 array containing the point cloud (x, y, z).
        base_dis (int): Base distance for the reference line.
        cam_dis (int): Camera distance for the reference line.
        delta (int): Delta value for drawing uncertainty lines around the base and camera distances.
        ax (matplotlib.axes.Axes): Axes object to draw the plot.
        view (str): Determines the plane to view ('YZ' for Y-Z plane, 'XZ' for X-Z plane).
        """
        # Extracting coordinates
        x, y, z = point_cloud[:, 0], point_cloud[:, 1], point_cloud[:, 2]

        # Setup plot
        # fig, ax = plt.subplots()
        if view == 'YZ':
            scatter = ax.scatter(y, z, c=z, cmap='viridis', marker='.')
            ax.set_xlabel('Y (mm)', fontsize=10)
            ax.set_ylabel('Z (mm)', fontsize=10)
        elif view == 'XZ':
            scatter = ax.scatter(x, z, c=z, cmap='viridis', marker='.')
            ax.set_xlabel('X (mm)', fontsize=10)
            ax.set_ylabel('Z (mm)', fontsize=10)
        else:
            raise ValueError("Invalid view specified. Use 'YZ' or 'XZ'.")

        ax.grid(True, which='both', linestyle='--', linewidth=0.5, color='gray', alpha=0.5)

        cbar = plt.colorbar(scatter, ax=ax)
        cbar.set_label('Depth (z)', fontsize=10)

        # Drawing lines at specified z distances
        if en_single_line:
            z_conditions = [cam_dis, cam_dis - delta, cam_dis + delta]
            colors = ['blue', 'red', 'red']
            linestyles = ['--', '-.', '-.']
            labels = ['CameraDis', 'CameraLowTh', 'CameraUpTh']
        else:
            z_conditions = [base_dis, cam_dis, base_dis - delta, base_dis + delta, cam_dis - delta, cam_dis + delta]
            colors = ['blue', 'blue', 'red', 'red', 'red', 'red']
            linestyles = ['--', '--', '-', '-', '-.', '-.']
            labels = ['BaseDis', 'CameraDis', 'BaseLowTh', 'BaseUpTh', 'CameraLowTh', 'CameraUpTh']

        for z_cond, color, linestyle, label in zip(z_conditions, colors, linestyles, labels):
            ax.axhline(y=z_cond, color=color, linestyle=linestyle, label=label)

        ax.set_title(title, fontsize=11)
        ax.legend(fontsize=8)
        plt.tight_layout(pad=0.5)

        # plt.show()

    @staticmethod
    def calculate_hole_rate(depth_image):
        zero_count = np.count_nonzero(depth_image == 0)
        total_pixels = depth_image.size
        hole_rate = zero_count / total_pixels

        return hole_rate

    @staticmethod
    def disp_to_depth(disp, Q23, Q32, Q33, sub_pixel_value=64, zoom_ratio=1.0, max_dis=3000):
        if disp is None:
            print(" [ERROR] Check input!")

        height, width = disp.shape
        depth = np.zeros_like(disp, dtype=np.uint16)

        for j in range(height):
            for i in range(width):
                disp_value = disp[j, i]
                if disp_value == 0:
                    depth[j, i] = 0
                else:
                    depth_value = (Q23 * zoom_ratio * sub_pixel_value) / (Q32 * disp_value + Q33 * sub_pixel_value)
                    depth[j, i] = np.uint16(depth_value + 0.5)
                    if depth[j, i] > max_dis:
                        depth[j, i] = 0

        return depth

    @staticmethod
    def disp_to_depth_vec(disp, Q23, Q32, Q33, sub_pixel_value=64, zoom_ratio=1.0, max_dis=3000):
        if disp is None:
            print(" [ERROR] Check input!")

        depth = np.where(disp == 0, 0,
                         (Q23 * zoom_ratio * sub_pixel_value) / (Q32 * disp + Q33 * sub_pixel_value) + 0.5)
        depth = np.where(depth > max_dis, 0, depth).astype(np.uint16)

        return depth

    @staticmethod
    def colorizer_depth_image(depth_img, max_dis=3000):
        """
        :param depth_img:
        :param max_dis:
        :return:
        """
        # Find the minimum depth values
        min_dis = np.min(depth_img)

        # Calculate the scaling factor
        fac = 255.0 / max_dis

        # Apply the scaling factor to the entire depth image
        dis = (depth_img - min_dis) * fac

        # Apply histogram equalization
        dis = cv2.equalizeHist(dis.astype(np.uint8))

        # Apply the Jet colormap
        pseudo_img = cv2.applyColorMap(dis, cv2.COLORMAP_JET)

        # Convert BGR to RGB
        pseudo_img = cv2.cvtColor(pseudo_img, cv2.COLOR_BGR2RGB)

        return pseudo_img

    @staticmethod
    def save_ply(file_name, points):
        # Open file for writing in text mode
        with open(file_name, 'w') as f:
            # Write header
            f.write('ply\n')
            f.write('format ascii 1.0\n')
            f.write(f'element vertex {points.shape[0]}\n')
            f.write('property float x\n')
            f.write('property float y\n')
            f.write('property float z\n')
            f.write('property uchar red\n')
            f.write('property uchar green\n')
            f.write('property uchar blue\n')
            f.write('end_header\n')

            # Write body
            for point in points:
                f.write(
                    f'{point[0]:.3f} {point[1]:.3f} {point[2]:.3f} {int(point[3])} {int(point[4])} {int(point[5])}\n')

    @staticmethod
    def aligned_dp_to_color_point_cloud(rgb, depth, rgbCamParam, depth_ratio=1.0, dis_near=100, dis_far=3000):
        fx = rgbCamParam['fx']
        fy = rgbCamParam['fy']
        cx = rgbCamParam['cx']
        cy = rgbCamParam['cy']

        dis_near *= depth_ratio
        dis_far *= depth_ratio

        height, width = depth.shape
        ratio = 1 / depth_ratio

        # Create meshgrid for pixel coordinates
        x = np.arange(width)
        y = np.arange(height)
        xv, yv = np.meshgrid(x, y)

        # Flatten the arrays for easier processing
        xv = xv.flatten()
        yv = yv.flatten()
        depth_flat = depth.flatten()

        # Filter points based on depth range
        valid = (depth_flat >= dis_near) & (depth_flat <= dis_far)
        xv = xv[valid]
        yv = yv[valid]
        depth_flat = depth_flat[valid]

        z = depth_flat * ratio
        x = (xv - cx) * z / fx
        y = (yv - cy) * z / fy

        # Correct RGB reading order
        rgb_flat = rgb.reshape(-1, 3)
        rgb_flat = rgb_flat[valid]

        # Create the point cloud array
        points = np.zeros((len(z), 6), dtype=np.float32)
        points[:, 0] = x
        points[:, 1] = y
        points[:, 2] = z
        points[:, 3] = rgb_flat[:, 2]  # r
        points[:, 4] = rgb_flat[:, 1]  # g
        points[:, 5] = rgb_flat[:, 0]  # b

        return points

    @staticmethod
    def get_file_hash(file_path):
        hash_algo = hashlib.md5()
        with open(file_path, 'rb') as f:
            while chunk := f.read(8192):
                hash_algo.update(chunk)
        return hash_algo.hexdigest()


class Colorizer(object):
    def __init__(self, color_map=None, levels=4096, depth_range=(0, 65535), mode='jet_red2blue'):
        self.color_map = color_map if color_map else self.default_jet_map(mode=mode)
        self.levels = levels
        self.min_dis, self.max_dis = depth_range
        self.cache = self.generate_cache()

    @staticmethod
    def default_jet_map(mode='jet_red2blue'):
        # Red to blue
        jet_red2blue = {
            0.00: np.array([0.5, 0, 0]),  # 深红色
            0.11: np.array([1, 0, 0]),  # 红色
            0.35: np.array([1, 1, 0]),  # 黄色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([0, 1, 1]),  # 青色
            0.86: np.array([0, 0, 1]),  # 蓝色
            1.00: np.array([0, 0, 0.5]),  # 深蓝色
        }
        jet_blue2red = {
            0.00: np.array([0, 0, 0.5]),  # 深蓝色
            0.11: np.array([0, 0, 1]),  # 蓝色
            0.35: np.array([0, 1, 1]),  # 青色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([1, 1, 0]),  # 黄色
            0.86: np.array([1, 0, 0]),  # 红色
            1.00: np.array([0.5, 0, 0]),  # 深红色
        }
        if mode == 'jet_red2blue':
            return jet_red2blue
        else:
            return jet_blue2red

    def generate_cache(self):
        # 生成颜色映射缓存
        cache = np.zeros((self.levels, 3))
        sorted_keys = sorted(self.color_map.keys())
        for i in range(self.levels):
            t = i / (self.levels - 1)
            for j in range(len(sorted_keys) - 1):
                if sorted_keys[j] <= t <= sorted_keys[j + 1]:
                    t_norm = (t - sorted_keys[j]) / (sorted_keys[j + 1] - sorted_keys[j])
                    color = (1 - t_norm) * self.color_map[sorted_keys[j]] + t_norm * self.color_map[sorted_keys[j + 1]]
                    cache[i] = color
                    break
        return cache

    def clip_and_normalize(self, depth_data, clip_range=None):
        if clip_range is None:
            clip_range = [self.min_dis, self.max_dis]
        # 剪裁并归一化深度数据
        depth_data = np.clip(depth_data, *clip_range)
        depth_data = (depth_data - clip_range[0]) / (clip_range[1] - clip_range[0])
        return depth_data

    def histogram_equalization(self, normalized_depth, bins=None):
        if bins is None:
            # 计算直方图并应用均衡化
            hist, bins = np.histogram(normalized_depth.flatten(), bins=self.levels, range=(0, 1), density=True)
        else:
            hist, bins = np.histogram(normalized_depth.flatten(), bins=bins, range=(0, 1), density=True)
        cdf = hist.cumsum()  # 累计分布函数
        cdf_normalized = cdf / cdf[-1]  # 归一化

        equalized_depth = np.interp(normalized_depth.flatten(), bins[:-1], cdf_normalized)
        return equalized_depth.reshape(normalized_depth.shape)

    def apply_color_map(self, equalized_depth):
        # 应用颜色映射
        indices = np.round(equalized_depth * (self.levels - 1)).astype(int)
        indices = np.clip(indices, 0, self.levels - 1)  # 确保索引在有效范围内
        colored_image = self.cache[indices].reshape(equalized_depth.shape + (3,))
        return colored_image

    def colorize(self, depth_data, clip_range, bins=65536, background_color=np.array([0, 0, 0])):
        # mark background
        mask_zero = (depth_data == 0)
        # 颜色化处理流程
        normalized_depth = self.clip_and_normalize(depth_data, clip_range)
        equalized_depth = self.histogram_equalization(normalized_depth, bins=bins)
        colorized_image = self.apply_color_map(equalized_depth)
        colorized_image[mask_zero] = background_color

        colorized_image = (colorized_image * 255).astype(np.uint8)
        return colorized_image

    def linear_colorize(self, depth_data, clip_range=None):
        if clip_range is None:
            clip_range = [self.min_dis, self.max_dis]
        depth_data_clipped = np.clip(depth_data, *clip_range)
        normalized_depth = (depth_data_clipped - clip_range[0]) / (clip_range[1] - clip_range[0])

        mask_zero = depth_data == 0

        indices = np.round(normalized_depth * (self.levels - 1)).astype(int)
        indices = np.clip(indices, 0, self.levels - 1)
        colored_image = self.cache[indices].reshape(depth_data.shape + (3,))

        background_color = np.array([0, 0, 0])  # 黑色背景
        colored_image[mask_zero] = background_color

        # 将颜色值缩放到[0, 255]区间并转换为uint8
        colored_image = (colored_image * 255).astype(np.uint8)
        return colored_image


class EdgeDenoiser(object):
    def __init__(self):
        pass

    @staticmethod
    def findDepthImageMainRanges(depthImg, numRanges=100, bins=1000):
        # Exclude zero-value regions from histgram analysis
        nonzero_indices = np.nonzero(depthImg)
        depth_values = depthImg[nonzero_indices]

        # Calculate histogram of depth values
        hist, bins = np.histogram(depth_values, bins=bins)

        # Find peaks in histogram to determine main depth ranges
        peaks, _ = np.histogram(hist, bins=np.arange(0, len(hist), 1))
        peak_indices = np.argsort(peaks)[::-1][:numRanges]

        main_ranges = []
        for idx in peak_indices:
            range_min = bins[idx]
            range_max = bins[idx + 1]
            main_ranges.append([range_min, range_max])

        return main_ranges

    @staticmethod
    def filterDepthImageByMainTargets(depthImg, main_ranges: list, fallback_threshold=120.0, enMedFilter=True):
        if len(main_ranges) == 0:
            print(" [Error] Main range is empty.")
            return depthImg

        filtered_img = np.copy(depthImg)

        # Calculate gradients along x and y directions
        grad_x = cv2.Sobel(depthImg, cv2.CV_32F, 1, 0)
        grad_y = cv2.Sobel(depthImg, cv2.CV_32F, 0, 1)

        # Compute gradient magnitude
        grad = cv2.magnitude(grad_x, grad_y)

        # get full edge mask
        edge_mask = (grad > fallback_threshold)

        # Initialize thresholds for different depth ranges
        threshold_map = np.zeros_like(depthImg, dtype=np.float32)

        # Set thresholds based on depth ranges
        for idx, (range_min, range_max) in enumerate(main_ranges):
            if 100 <= range_min <= 400:
                threshold = 200
            elif 400 < range_min <= 800:
                threshold = 180
            elif 700 < range_min <= 1200:
                threshold = 160
            elif 1000 < range_min <= 1600:
                threshold = 140
            elif 1300 < range_min <= 2000:
                threshold = 130
            elif 1600 < range_min <= 2400:
                threshold = 120
            else:
                threshold = fallback_threshold

            # Apply threshold within current range
            mask = (depthImg >= range_min) & (depthImg <= range_max)
            use_mask = mask & edge_mask

            threshold_map[use_mask] = threshold

        # Create a mask for flying pixels where gradient magnitude is greater than the threshold
        mask = threshold_map > 0

        # Set flying pixels to 0
        filtered_img[mask] = 0

        # median filter
        if enMedFilter:
            filtered_img = cv2.medianBlur(filtered_img, 3)

        return filtered_img

    @staticmethod
    def denoiseDepthEdgeByHist(img, numRanges=100, bins=1000, threshold=100, enMedFilter=True):
        main_ranges = EdgeDenoiser().findDepthImageMainRanges(img, numRanges, bins)
        filtered_img = EdgeDenoiser().filterDepthImageByMainTargets(img, main_ranges, fallback_threshold=threshold, enMedFilter=enMedFilter)

        return filtered_img

    @staticmethod
    def denoiseDepthEdge(img, depth_range: list, threshold_range: list, enMedFilter=True):
        if len(depth_range) == 0 or len(threshold_range) == 0:
            depth_range = [(100, 500), (500, 1000), (1000, 1500), (1500, 2000)]
            threshold_range = [200, 180, 160, 140]

        filtered_img = np.copy(img)

        # Calculate gradients along x and y directions
        grad_x = cv2.Sobel(img, cv2.CV_32F, 1, 0)
        grad_y = cv2.Sobel(img, cv2.CV_32F, 0, 1)

        # Compute gradient magnitude
        grad_mag = np.sqrt(grad_x ** 2 + grad_y ** 2)

        # Initialize threshold map
        threshold_map = np.zeros_like(img, dtype=np.float32)

        # Apply segmented thresholds based on depth ranges
        for idx, (range_min, range_max) in enumerate(depth_range):
            mask = (img >= range_min) & (img <= range_max)
            threshold_map[mask] = threshold_range[idx]

        # fix distance
        mask = (img >= 2000) & (img <= 3000)
        threshold_map[mask] = 200

        # Create mask based on gradient magnitude and threshold map
        mask = grad_mag > threshold_map

        # Set flying pixels to 0
        filtered_img[mask] = 0

        if enMedFilter:
            filtered_img = cv2.medianBlur(filtered_img, 3)

        return filtered_img

    @staticmethod
    def expand_edges_and_analyze(depth_img):
        # 使用Sobel算子找到边缘
        grad_x = cv2.Sobel(depth_img, cv2.CV_32F, 1, 0)
        grad_y = cv2.Sobel(depth_img, cv2.CV_32F, 0, 1)
        grad_mag = cv2.magnitude(grad_x, grad_y)

        # 使用阈值找到边缘
        _, edge_mask = cv2.threshold(grad_mag, 50, 255, cv2.THRESH_BINARY)
        edge_mask = edge_mask.astype(np.uint8)

        # 边缘膨胀
        kernel = np.ones((5, 5), np.uint8)
        dilated_edges = cv2.dilate(edge_mask, kernel, iterations=1)

        # 使用膨胀后的边缘进行邻域分析
        mean_filter_size = 5
        local_mean = cv2.blur(depth_img, (mean_filter_size, mean_filter_size))
        local_stddev = cv2.blur((depth_img - local_mean) ** 2, (mean_filter_size, mean_filter_size)) ** 0.5

        # 创建输出图像，仅在边缘区域显示处理后的结果
        filtered_img = np.where(dilated_edges == 255, local_mean, depth_img)

        return filtered_img
