import os
import torch
import numpy as np
import cv2
from PIL import Image
from typing import Dict, Any, List, Tuple, Optional
from .dctnet import DCTNetCore, ModelLoadError
from .utils import NodeProgressReporter

class DCTNetLoader:
    """Node for loading DCT-Net models"""
    
    @classmethod
    def INPUT_TYPES(cls) -> Dict[str, Any]:
        return {
            "required": {
                "style": (list(DCTNetCore.MODEL_MAPPING.keys()),),
                "cache_dir": ("STRING", {"default": "models/dctnet"})
            }
        }
    
    RETURN_TYPES = ("DCTNET_MODEL",)
    FUNCTION = "load_model"
    CATEGORY = "DCT-Net"
    
    def load_model(self, style: str, cache_dir: str) -> tuple:
        try:
            core = DCTNetCore(cache_dir=cache_dir)
            model = core.load_model(style)
            return (model,)
        except ModelLoadError as e:
            raise Exception(f"Failed to load DCT-Net model: {str(e)}")

class DCTNetProcess:
    """Node for processing images with DCT-Net"""
    
    @classmethod
    def INPUT_TYPES(cls) -> Dict[str, Any]:
        return {
            "required": {
                "image": ("IMAGE",),
                "model": ("DCTNET_MODEL",),
            }
        }
    
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "process_image"
    CATEGORY = "DCT-Net"
    
    def process_image(self, image: np.ndarray, model: Any) -> tuple:
        try:
            core = DCTNetCore()
            result = core.process_image(image, model)
            return (result,)
        except Exception as e:
            raise Exception(f"Failed to process image: {str(e)}")

class DCTNetBatchProcess:
    """Node for batch processing images with DCT-Net"""
    
    @classmethod
    def INPUT_TYPES(cls) -> Dict[str, Any]:
        return {
            "required": {
                "images": ("IMAGE",),
                "model": ("DCTNET_MODEL",),
            }
        }
    
    RETURN_TYPES = ("IMAGE",)
    FUNCTION = "process_batch"
    CATEGORY = "DCT-Net"
    
    def process_batch(self, images: np.ndarray, model: Any) -> tuple:
        try:
            core = DCTNetCore()
            results = []
            
            # Process each image in the batch
            for i in range(len(images)):
                result = core.process_image(images[i], model)
                results.append(result)
            
            # Stack results back into a batch
            batch_result = np.stack(results)
            return (batch_result,)
        except Exception as e:
            raise Exception(f"Failed to process batch: {str(e)}")

class VideoFrameExtractor:
    """从视频文件中提取帧"""
    
    @classmethod
    def INPUT_TYPES(cls) -> Dict[str, Any]:
        return {
            "required": {
                "video_path": ("STRING", {"default": ""}),
                "frame_interval": ("INT", {"default": 1, "min": 1, "max": 30}),
                "max_frames": ("INT", {"default": 0, "min": 0}),
                "start_time": ("FLOAT", {"default": 0.0, "min": 0.0}),
                "end_time": ("FLOAT", {"default": -1.0}),
            }
        }
    
    RETURN_TYPES = ("IMAGE", "INT")
    RETURN_NAMES = ("frames", "frame_count")
    FUNCTION = "extract_frames"
    CATEGORY = "DCT-Net/Video"
    
    def extract_frames(self, video_path: str, frame_interval: int = 1,
                      max_frames: int = 0, start_time: float = 0.0,
                      end_time: float = -1.0) -> Tuple[np.ndarray, int]:
        """从视频中提取帧"""
        if not os.path.exists(video_path):
            raise FileNotFoundError(f"视频文件不存在: {video_path}")
        
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise RuntimeError(f"无法打开视频文件: {video_path}")
        
        try:
            # 获取视频信息
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            duration = total_frames / fps
            
            # 计算开始和结束帧
            start_frame = int(start_time * fps)
            end_frame = int(end_time * fps) if end_time > 0 else total_frames
            
            # 创建进度报告器
            total_steps = (end_frame - start_frame) // frame_interval
            progress = NodeProgressReporter(total_steps, "提取视频帧", self)
            
            # 设置开始位置
            cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
            
            frames = []
            frame_count = 0
            current_frame = start_frame
            
            while current_frame < end_frame:
                ret, frame = cap.read()
                if not ret:
                    break
                
                if frame_count % frame_interval == 0:
                    # 转换BGR到RGB
                    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    # 转换为float32并归一化到0-1
                    frame_normalized = frame_rgb.astype(np.float32) / 255.0
                    frames.append(frame_normalized)
                    progress.update()
                
                frame_count += 1
                current_frame += 1
                
                if max_frames > 0 and len(frames) >= max_frames:
                    break
            
            if not frames:
                raise RuntimeError("没有提取到任何帧")
            
            # 完成进度报告
            progress.done()
            
            # 将帧堆叠为批量数组
            frames_array = np.stack(frames)
            return (frames_array, len(frames))
            
        finally:
            cap.release()

class VideoFrameRebuilder:
    """将处理后的帧重建为视频"""
    
    @classmethod
    def INPUT_TYPES(cls) -> Dict[str, Any]:
        return {
            "required": {
                "frames": ("IMAGE",),
                "output_path": ("STRING", {"default": "output.mp4"}),
                "fps": ("FLOAT", {"default": 30.0, "min": 1.0, "max": 120.0}),
                "quality": ("INT", {"default": 95, "min": 0, "max": 100}),
            }
        }
    
    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("output_path",)
    FUNCTION = "rebuild_video"
    CATEGORY = "DCT-Net/Video"
    
    def rebuild_video(self, frames: np.ndarray, output_path: str,
                     fps: float = 30.0, quality: int = 95) -> Tuple[str]:
        """将处理后的帧重建为视频"""
        if not isinstance(frames, np.ndarray) or len(frames.shape) != 4:
            raise ValueError("frames必须是形状为(N,H,W,3)的numpy数组")
        
        # 确保输出目录存在
        os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
        
        # 创建进度报告器
        progress = NodeProgressReporter(len(frames), "重建视频", self)
        
        # 准备视频写入器
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        height, width = frames.shape[1:3]
        writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        try:
            for frame in frames:
                # 转换为uint8并从RGB转到BGR
                frame_uint8 = (frame * 255).astype(np.uint8)
                frame_bgr = cv2.cvtColor(frame_uint8, cv2.COLOR_RGB2BGR)
                writer.write(frame_bgr)
                progress.update()
            
            # 完成进度报告
            progress.done()
            return (output_path,)
            
        finally:
            writer.release()

class VideoProgressiveProcessor:
    """渐进式处理视频帧"""
    
    @classmethod
    def INPUT_TYPES(cls) -> Dict[str, Any]:
        return {
            "required": {
                "video_path": ("STRING", {"default": ""}),
                "model": ("DCTNET_MODEL",),
                "output_path": ("STRING", {"default": "output.mp4"}),
                "batch_size": ("INT", {"default": 4, "min": 1, "max": 16}),
                "frame_interval": ("INT", {"default": 1, "min": 1, "max": 30}),
            }
        }
    
    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("output_path",)
    FUNCTION = "process_video"
    CATEGORY = "DCT-Net/Video"
    
    def process_video(self, video_path: str, model: Any, output_path: str,
                     batch_size: int = 4, frame_interval: int = 1) -> Tuple[str]:
        """渐进式处理视频帧"""
        if not os.path.exists(video_path):
            raise FileNotFoundError(f"视频文件不存在: {video_path}")
        
        # 创建DCT-Net核心实例
        core = DCTNetCore()
        
        # 打开视频
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise RuntimeError(f"无法打开视频文件: {video_path}")
        
        try:
            # 获取视频信息
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            
            # 创建进度报告器
            progress = NodeProgressReporter(total_frames // frame_interval, "处理视频", self)
            
            # 准备视频写入器
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            writer = cv2.VideoWriter(output_path, fourcc, fps/frame_interval, (width, height))
            
            # 批量处理帧
            frames_buffer = []
            frame_count = 0
            
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                
                if frame_count % frame_interval == 0:
                    # 转换BGR到RGB并归一化
                    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    frame_normalized = frame_rgb.astype(np.float32) / 255.0
                    frames_buffer.append(frame_normalized)
                    
                    # 当缓冲区达到批处理大小时处理
                    if len(frames_buffer) >= batch_size:
                        # 处理批次
                        batch = np.stack(frames_buffer)
                        processed_batch = []
                        for frame in batch:
                            processed = core.process_image(frame, model)
                            processed_batch.append(processed)
                            progress.update()
                        
                        # 写入处理后的帧
                        for processed_frame in processed_batch:
                            frame_uint8 = (processed_frame * 255).astype(np.uint8)
                            frame_bgr = cv2.cvtColor(frame_uint8, cv2.COLOR_RGB2BGR)
                            writer.write(frame_bgr)
                        
                        frames_buffer = []
                
                frame_count += 1
            
            # 处理剩余的帧
            if frames_buffer:
                batch = np.stack(frames_buffer)
                processed_batch = []
                for frame in batch:
                    processed = core.process_image(frame, model)
                    processed_batch.append(processed)
                    progress.update()
                
                for processed_frame in processed_batch:
                    frame_uint8 = (processed_frame * 255).astype(np.uint8)
                    frame_bgr = cv2.cvtColor(frame_uint8, cv2.COLOR_RGB2BGR)
                    writer.write(frame_bgr)
            
            # 完成进度报告
            progress.done()
            return (output_path,)
            
        finally:
            cap.release()
            writer.release()

# Node class definitions
NODE_CLASS_MAPPINGS = {
    "DCTNetLoader": DCTNetLoader,
    "DCTNetProcess": DCTNetProcess,
    "DCTNetBatchProcess": DCTNetBatchProcess,
    "VideoFrameExtractor": VideoFrameExtractor,
    "VideoFrameRebuilder": VideoFrameRebuilder,
    "VideoProgressiveProcessor": VideoProgressiveProcessor
}

# Node display names
NODE_DISPLAY_NAME_MAPPINGS = {
    "DCTNetLoader": "Load DCT-Net Model",
    "DCTNetProcess": "DCT-Net Process Image",
    "DCTNetBatchProcess": "DCT-Net Batch Process",
    "VideoFrameExtractor": "Extract Video Frames",
    "VideoFrameRebuilder": "Rebuild Video",
    "VideoProgressiveProcessor": "Process Video (Progressive)"
} 