import os
import tempfile
import ffmpeg
import folder_paths
from PIL import Image, ImageSequence
import numpy as np
import torch


class VideoCropNode:
    """
    A ComfyUI node for cropping videos using FFmpeg with pixel-precise control
    """
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "video": ("IMAGE",),
                "x": ("INT", {
                    "default": 0,
                    "min": 0,
                    "max": 4096,
                    "step": 1,
                    "display": "number"
                }),
                "y": ("INT", {
                    "default": 0,
                    "min": 0,
                    "max": 4096,
                    "step": 1,
                    "display": "number"
                }),
                "width": ("INT", {
                    "default": 512,
                    "min": 1,
                    "max": 4096,
                    "step": 1,
                    "display": "number"
                }),
                "height": ("INT", {
                    "default": 512,
                    "min": 1,
                    "max": 4096,
                    "step": 1,
                    "display": "number"
                }),
                "output_format": (["mp4", "webm", "avi", "mov"],),
                "codec": (["libx264", "libx265", "libvpx-vp9", "prores"],),
                "quality": ("INT", {
                    "default": 23,
                    "min": 0,
                    "max": 51,
                    "step": 1,
                    "display": "number"
                }),
            },
            "optional": {
                "fps": ("FLOAT", {
                    "default": 30.0,
                    "min": 1.0,
                    "max": 120.0,
                    "step": 0.1,
                    "display": "number"
                }),
                "extra_ffmpeg_args": ("STRING", {
                    "multiline": True,
                    "default": "",
                    "placeholder": "Additional FFmpeg arguments (e.g., -preset fast)"
                }),
            }
        }
    
    RETURN_TYPES = ("IMAGE",)
    RETURN_NAMES = ("cropped_video",)
    FUNCTION = "crop_video"
    CATEGORY = "video/processing"
    OUTPUT_NODE = True
    
    def crop_video(self, video, x, y, width, height, output_format="mp4", codec="libx264", quality=23, fps=30.0, extra_ffmpeg_args=""):
        """
        Crop video using FFmpeg with specified parameters
        """
        try:
            # Create temporary directory for processing
            with tempfile.TemporaryDirectory() as temp_dir:
                # Convert tensor frames to temporary video file
                input_video_path = os.path.join(temp_dir, f"input_video.{output_format}")
                output_video_path = os.path.join(temp_dir, f"cropped_video.{output_format}")
                
                # Convert ComfyUI image tensor to video file
                self._tensor_to_video(video, input_video_path, fps)
                
                # Create ffmpeg input stream
                input_stream = ffmpeg.input(input_video_path)
                
                # Apply crop filter
                cropped_stream = ffmpeg.crop(input_stream, x, y, width, height)
                
                # Configure output with codec and quality settings
                output_args = {"vcodec": codec, "r": fps}
                
                # Add quality settings based on codec
                if codec in ["libx264", "libx265"]:
                    output_args["crf"] = quality
                elif codec == "libvpx-vp9":
                    output_args["crf"] = quality
                    output_args["b:v"] = "0"
                elif codec == "prores":
                    output_args["profile:v"] = "2"
                
                # Parse extra FFmpeg arguments
                if extra_ffmpeg_args.strip():
                    extra_args = extra_ffmpeg_args.strip().split()
                    for i in range(0, len(extra_args), 2):
                        if i + 1 < len(extra_args):
                            key = extra_args[i].lstrip('-')
                            value = extra_args[i + 1]
                            output_args[key] = value
                
                # Create output stream
                output_stream = ffmpeg.output(cropped_stream, output_video_path, **output_args)
                
                # Run the FFmpeg process
                ffmpeg.run(output_stream, overwrite_output=True, quiet=True)
                
                # Convert output video back to tensor
                output_tensor = self._video_to_tensor(output_video_path)
                
                return (output_tensor,)
                
        except ffmpeg.Error as e:
            raise Exception(f"FFmpeg error: {e.stderr.decode() if e.stderr else str(e)}")
        except Exception as e:
            raise Exception(f"Video cropping failed: {str(e)}")
    
    def _tensor_to_video(self, tensor, output_path, fps):
        """
        Convert ComfyUI image tensor to video file using ffmpeg-python
        """
        # tensor shape: [batch, height, width, channels]
        if len(tensor.shape) != 4:
            raise ValueError(f"Expected 4D tensor, got {len(tensor.shape)}D")
        
        batch_size, height, width, channels = tensor.shape
        
        # Create temporary frame directory
        frame_dir = os.path.dirname(output_path)
        frame_pattern = os.path.join(frame_dir, "frame_%06d.png")
        
        # Save frames as images
        for i in range(batch_size):
            frame = tensor[i]
            # Convert from [0,1] float to [0,255] uint8
            if frame.dtype == torch.float32 or frame.dtype == torch.float64:
                frame = (frame * 255).clamp(0, 255).byte()
            
            # Convert tensor to numpy array
            if isinstance(frame, torch.Tensor):
                frame_np = frame.cpu().numpy()
            else:
                frame_np = frame
            
            # Ensure correct data type
            if frame_np.dtype != np.uint8:
                frame_np = frame_np.astype(np.uint8)
            
            # Create PIL image and save
            if channels == 3:
                img = Image.fromarray(frame_np, 'RGB')
            elif channels == 4:
                img = Image.fromarray(frame_np, 'RGBA')
            else:
                img = Image.fromarray(frame_np.squeeze(), 'L')
            
            img.save(frame_pattern % (i + 1))
        
        # Create video from frames using ffmpeg-python
        input_stream = ffmpeg.input(frame_pattern, framerate=fps)
        output_stream = ffmpeg.output(
            input_stream, 
            output_path,
            vcodec='libx264',
            pix_fmt='yuv420p'
        )
        ffmpeg.run(output_stream, overwrite_output=True, quiet=True)
    
    def _video_to_tensor(self, video_path):
        """
        Convert video file back to ComfyUI image tensor using ffmpeg-python
        """
        # Extract frames from video using ffmpeg-python
        frame_dir = os.path.dirname(video_path)
        frame_pattern = os.path.join(frame_dir, "output_frame_%06d.png")
        
        # Create ffmpeg pipeline to extract frames
        input_stream = ffmpeg.input(video_path)
        # Ensure even dimensions for proper video encoding
        filtered_stream = ffmpeg.filter(input_stream, 'scale', 'trunc(iw/2)*2', 'trunc(ih/2)*2')
        output_stream = ffmpeg.output(filtered_stream, frame_pattern)
        
        ffmpeg.run(output_stream, overwrite_output=True, quiet=True)
        
        # Load frames back into tensor
        frames = []
        frame_index = 1
        
        while True:
            frame_path = frame_pattern % frame_index
            if not os.path.exists(frame_path):
                break
            
            img = Image.open(frame_path)
            # Convert to RGB if needed
            if img.mode != 'RGB':
                img = img.convert('RGB')
            
            # Convert to numpy array and normalize to [0,1]
            frame_np = np.array(img).astype(np.float32) / 255.0
            frames.append(frame_np)
            frame_index += 1
        
        if not frames:
            raise Exception("No frames extracted from output video")
        
        # Convert to tensor
        frames_tensor = torch.from_numpy(np.stack(frames))
        return frames_tensor


# Node class mappings for ComfyUI
NODE_CLASS_MAPPINGS = {
    "VideoCropNode": VideoCropNode
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "VideoCropNode": "Video Crop (FFmpeg)"
}