#!/usr/bin/env python

# Copyright (c) 2024 Óscar Pons Fernández <oscarpf22@gmail.com>
# Copyright (c) 2024 Alberto J. Tudela Roldán <ajtudela@gmail.com>
# Copyright (c) 2024 Grupo Avispa, DTE, Universidad de Málaga
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# Python
import time
import numpy as np
import torch
import cv2
from cv_bridge import CvBridge, CvBridgeError

# ROS 2
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, QoSDurabilityPolicy, QoSHistoryPolicy, QoSReliabilityPolicy
from sensor_msgs.msg import Image, CompressedImage

# DepthAnything V2
from depth_anything_v2.depth_anything_v2.dpt import DepthAnythingV2


class DepthAnythingROS(Node):
    """DepthAnythingROS node

    This node subscribes to an image topic and publishes the image depth image estimation.

    Parameters
    ----------
        image_topic : str
            Topic where the image will be subscribed.
        depth_image_topic : str
            Topic where the raw depth image will be published.
        device : str
            Device to use for the inference (cpu or cuda).
        model_file : str
            Path to the model.
        encoder : str
            Encoder to use for the model (vits, vitb or vitl).

    Subscribers
    ----------
        image_topic : sensor_msgs.msg.CompressedImage
            Compressed image topic where the rgb image will be subscribed.

    Publishers
    ----------
        depth : sensor_msgs.msg.Image
            Image topic where the depth image will be published in 32FC1 format.

    Methods
    -------
        __init__(self)
            Initializes the node.
        get_params(self)
            Gets the ros2 parameters.
        image_callback(self, image_msg: CompressedImage)
            Callback function for the image topic.
        process_depth_32fc1(self, depth_raw)
            Process raw depth data using algorithm for 32FC1 format.
    """

    def __init__(self):
        super().__init__('depth_anything')
        self.bridge = CvBridge()

        # Get parameters
        self.get_params()
        # Check device selected and if gpu is available
        if self.device != 'cpu':
            if not torch.cuda.is_available():
                self.get_logger().info(f'Device could not be set to: [{self.device}] ...')
                self.device = "cpu"
        self.get_logger().info(f'Setting device to: [{self.device}]')

        # Model initialization
        if self.encoder == 'vits':
            self.model = DepthAnythingV2(encoder=self.encoder, features=64,
                                         out_channels=[48, 96, 192, 384])
        elif self.encoder == 'vitb':
            self.model = DepthAnythingV2(encoder=self.encoder, features=128,
                                         out_channels=[96, 192, 384, 768])
        elif self.encoder == 'vitl':
            self.model = DepthAnythingV2(encoder=self.encoder, features=256,
                                         out_channels=[256, 512, 1024, 1024])
        else:
            self.get_logger().error(
                f'nWrong type of encoder: [{self.encoder}]. Must be vits, vitb or vitl')
        # Load model with comprehensive error handling
        try:
            state_dict = torch.load(self.model_file, map_location=self.device)
            self.get_logger().info(f'Loading state dict with {len(state_dict)} keys')
            
            # Debug: show some key patterns in the state dict
            model_keys = [k for k in state_dict.keys() if 'depth_head.projects' in k]
            self.get_logger().info(f'Found {len(model_keys)} depth_head.projects keys in state dict')
            if model_keys:
                for key in model_keys[:3]:  # Show first 3
                    self.get_logger().info(f'  State dict has: {key}')
            
            # Debug: show current model architecture
            current_model_keys = [k for k, v in self.model.state_dict().items() if 'depth_head.projects' in k]
            self.get_logger().info(f'Current model has {len(current_model_keys)} depth_head.projects keys')
            if current_model_keys:
                for key in current_model_keys[:3]:  # Show first 3
                    self.get_logger().info(f'  Current model has: {key}')
            
            # Try strict loading first
            missing_keys, unexpected_keys = self.model.load_state_dict(state_dict, strict=False)
            
            if missing_keys:
                self.get_logger().warn(f'Missing keys: {len(missing_keys)} keys are missing')
                for key in missing_keys[:5]:  # Show first 5 missing keys
                    self.get_logger().warn(f'  Missing: {key}')
                if len(missing_keys) > 5:
                    self.get_logger().warn(f'  ... and {len(missing_keys) - 5} more missing keys')
            
            if unexpected_keys:
                self.get_logger().warn(f'Unexpected keys: {len(unexpected_keys)} keys are unexpected')
                for key in unexpected_keys[:5]:  # Show first 5 unexpected keys
                    self.get_logger().warn(f'  Unexpected: {key}')
                if len(unexpected_keys) > 5:
                    self.get_logger().warn(f'  ... and {len(unexpected_keys) - 5} more unexpected keys')
            
            if not missing_keys and not unexpected_keys:
                self.get_logger().info('Model loaded successfully with all keys matching')
            else:
                self.get_logger().warn('Model loaded but with key mismatches - this may affect performance')
                
        except Exception as e:
            self.get_logger().error(f'Failed to load model: {e}')
            raise
        
        self.model.to(self.device).eval()

        # Create common publishers
        sensor_qos_profile = QoSProfile(
            durability=QoSDurabilityPolicy.VOLATILE,
            reliability=QoSReliabilityPolicy.BEST_EFFORT,
            history=QoSHistoryPolicy.KEEP_LAST,
            depth=1)
        self.depth_image_pub = self.create_publisher(
            Image, self.depth_image_topic, sensor_qos_profile)

        # Create subscribers
        self.image_sub = self.create_subscription(
            CompressedImage, self.image_topic, self.image_callback, sensor_qos_profile)

    def get_params(self) -> None:
        """Get the parameters from the parameter server.
        """
        # Declare and acquire parameters
        self.declare_parameter('image_topic', '/image')
        self.image_topic = self.get_parameter(
            'image_topic').get_parameter_value().string_value
        self.get_logger().info(
            f'The parameter image_topic is set to: [{self.image_topic}]')

        self.declare_parameter('depth_image_topic', 'depth')
        self.depth_image_topic = self.get_parameter(
            'depth_image_topic').get_parameter_value().string_value
        self.get_logger().info(
            f'The parameter depth_image_topic is set to: [{self.depth_image_topic}]')

        self.declare_parameter('device', 'cuda:0')
        self.device = self.get_parameter(
            'device').get_parameter_value().string_value
        self.get_logger().info(
            f'The parameter device is set to: [{self.device}]')

        self.declare_parameter('model_file', '/home/sunrise/data/model/depth_anything_v2_vits.pth')
        self.model_file = self.get_parameter(
            'model_file').get_parameter_value().string_value
        self.get_logger().info(
            f'The parameter model_file is set to: [{self.model_file}]')

        self.declare_parameter('encoder', 'vits')
        self.encoder = self.get_parameter(
            'encoder').get_parameter_value().string_value
        self.get_logger().info(
            f'The parameter encoder is set to: [{self.encoder}]')

    def image_callback(self, image_msg: CompressedImage) -> None:
        """Publishes the image with the detections.

        Args:
            image_msg (CompressedImage): Compressed image message.
        """

        # Only detect object if there's any subscriber
        if self.depth_image_pub.get_subscription_count() == 0:
            return

        self.get_logger().info(
            f'Subscribed to depth image topic: [{self.depth_image_topic}]', once=True)
        start_time = time.time()

        # Convert ROS CompressedImage to OpenCV Image
        try:
            self.current_image = self.bridge.compressed_imgmsg_to_cv2(image_msg, "bgr8")
        except CvBridgeError as e:
            self.get_logger().error(f'Could not convert compressed image to OpenCV: {e}')
            return

        start_time = time.time()

        # Perform inference
        depth_raw = self.model.infer_image(self.current_image)

        # Process depth data using algorithm for 32FC1 format
        depth_32fc1 = self.process_depth_32fc1(depth_raw)

        end_time = time.time()
        execution_time = end_time - start_time
        self.get_logger().info(f"Depth estimation took: {execution_time * 1000:.1f} ms")

        # Convert to ROS Image message with 32FC1 encoding and publish it
        try:
            ros_image = self.bridge.cv2_to_imgmsg(depth_32fc1, "32FC1", image_msg.header)
            self.depth_image_pub.publish(ros_image)
        except CvBridgeError as e:
            self.get_logger().error(f"Could not convert to 32FC1: {e}")
            print(e)

    def process_depth_32fc1(self, depth_raw):
        """Process raw depth data using algorithm for 32FC1 format.
        
        Args:
            depth_raw: Raw depth numpy array from model inference
            
        Returns:
            Processed depth array in 32FC1 format (32-bit float, single channel)
        """
        try:
            # Ensure depth data is float32 and contiguous
            depth_processed = np.array(depth_raw, dtype=np.float32)
            
            # Apply depth enhancement algorithms
            
            # 1. Remove outliers using statistical filtering
            mean_depth = np.mean(depth_processed)
            std_depth = np.std(depth_processed)
            
            # Remove values beyond 3 standard deviations as outliers
            lower_bound = mean_depth - 3 * std_depth
            upper_bound = mean_depth + 3 * std_depth
            
            # Clamp extreme values instead of removing them to maintain image dimensions
            depth_processed = np.clip(depth_processed, lower_bound, upper_bound)
            
            # 2. Apply bilateral filter for noise reduction while preserving edges
            if len(depth_processed.shape) == 2:
                # Bilateral filter parameters
                d = 5  # Neighborhood diameter
                sigma_color = 50  # Filter sigma in the color space
                sigma_space = 50  # Filter sigma in the coordinate space
                
                depth_processed = cv2.bilateralFilter(depth_processed.astype(np.float32), 
                                                    d, sigma_color, sigma_space)
            
            # 3. Apply median filter for additional noise reduction
            if len(depth_processed.shape) == 2:
                # Convert to uint8 for median filter, then back to float32
                depth_temp = ((depth_processed - depth_processed.min()) / 
                             (depth_processed.max() - depth_processed.min() + 1e-8) * 255).astype(np.uint8)
                depth_temp = cv2.medianBlur(depth_temp, 3)
                depth_processed = (depth_temp.astype(np.float32) / 255.0 * 
                                 (depth_processed.max() - depth_processed.min()) + depth_processed.min())
            
            # 4. Normalize depth values to reasonable range (0.5 to 50.0 meters)
            depth_min = np.percentile(depth_processed, 1)  # Use 1st percentile instead of min
            depth_max = np.percentile(depth_processed, 99)  # Use 99th percentile instead of max
            
            if depth_max > depth_min:
                # Apply logarithmic scaling for better depth distribution
                depth_log = np.log(depth_processed + 1e-6)  # Add small value to avoid log(0)
                depth_log_min = np.log(depth_min + 1e-6)
                depth_log_max = np.log(depth_max + 1e-6)
                
                # Normalize to 0.5-50.0 meter range
                depth_normalized = 0.5 + (depth_log - depth_log_min) / (depth_log_max - depth_log_min) * 49.5
            else:
                depth_normalized = depth_processed
            
            # 5. Apply Gaussian smoothing for final refinement
            depth_final = cv2.GaussianBlur(depth_normalized.astype(np.float32), (3, 3), 0)
            
            # Ensure output is 32FC1 format
            if len(depth_final.shape) != 2:
                depth_final = depth_final.squeeze()
            
            # Ensure contiguous array for ROS message
            depth_final = np.ascontiguousarray(depth_final, dtype=np.float32)
            
            # Log depth statistics for monitoring
            self.get_logger().debug(f"Depth 32FC1 - Min: {depth_final.min():.3f}, "
                                  f"Max: {depth_final.max():.3f}, Mean: {depth_final.mean():.3f}")
            
            return depth_final
            
        except Exception as e:
            self.get_logger().error(f"Error processing depth for 32FC1: {e}")
            # Fallback: return simple float32 conversion
            return np.ascontiguousarray(depth_raw.astype(np.float32))


def main(args=None):
    rclpy.init(args=args)
    node = DepthAnythingROS()
    rclpy.spin(node)

    # Destroy the node explicitly
    # (optional - otherwise it will be done automatically
    # when the garbage collector destroys the node object)
    node.destroy_node()
    rclpy.shutdown()


if __name__ == '__main__':
    main()