#!/usr/bin/env python3
"""
Script to process text.jsonl file with DepthCrafter inference and evaluation
"""

import json
import os
import argparse
import glob
import numpy as np
import torch
import cv2
from pathlib import Path
from tqdm import tqdm
import gc

from os.path import join
from depthcrafter.depth_crafter_ppl import DepthCrafterPipeline
from depthcrafter.unet import DiffusersUNetSpatioTemporalConditionModelDepthCrafter
from depthcrafter.utils import read_video_frames, save_video, vis_sequence_depth


class DepthCrafterProcessor:
    def __init__(
        self,
        unet_path: str = "tencent/DepthCrafter",
        pre_train_path: str = "stabilityai/stable-video-diffusion-img2vid-xt",
        cpu_offload: str = "model",
    ):
        """Initialize DepthCrafter pipeline"""
        print("Loading DepthCrafter model...")
        unet = DiffusersUNetSpatioTemporalConditionModelDepthCrafter.from_pretrained(
            unet_path,
            low_cpu_mem_usage=True,
            torch_dtype=torch.float16,
        )
        
        self.pipe = DepthCrafterPipeline.from_pretrained(
            pre_train_path,
            unet=unet,
            torch_dtype=torch.float16,
            variant="fp16",
            track_time = True,
        )

        # Memory optimization
        if cpu_offload is not None:
            if cpu_offload == "sequential":
                self.pipe.enable_sequential_cpu_offload()
            elif cpu_offload == "model":
                self.pipe.enable_model_cpu_offload()
            else:
                raise ValueError(f"Unknown cpu offload option: {cpu_offload}")
        else:
            self.pipe.to("cuda")
            
        # Enable optimizations
        try:
            self.pipe.enable_xformers_memory_efficient_attention()
        except Exception as e:
            print(f"Xformers not enabled: {e}")
        self.pipe.enable_attention_slicing()
        print("DepthCrafter model loaded successfully!")

    def create_video_from_images(self, image_dir, rgb_suffix, output_video_path, fps=15):
        """Create video from image sequence"""
        # Find all images with the specified suffix
        image_pattern = os.path.join(image_dir, f"*{rgb_suffix}")
        image_files = sorted(glob.glob(image_pattern))
        
        if not image_files:
            raise ValueError(f"No images found with pattern: {image_pattern}")
        
        print(f"Found {len(image_files)} images in {image_dir}")
        
        # Read first image to get dimensions
        first_img = cv2.imread(image_files[0])
        if first_img is None:
            raise ValueError(f"Could not read image: {image_files[0]}")
        
        height, width = first_img.shape[:2]
        
        # Create video writer
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
        
        for img_path in image_files:
            img = cv2.imread(img_path)
            if img is not None:
                out.write(img)
        
        out.release()
        return output_video_path

    def infer_depth(
        self,
        video_path: str,
        output_dir: str,
        seq_name: str,
        num_denoising_steps: int = 5,
        guidance_scale: float = 1.0,
        window_size: int = 110,
        process_length: int = 195,
        overlap: int = 25,
        max_res: int = 1024,
        target_fps: int = 15,
        seed: int = 42,
    ):
        """Run DepthCrafter inference on a video"""
        print(f"Processing {seq_name}...")


        # Create output directory
        os.makedirs(output_dir, exist_ok=True)
        
        # Save results
        save_dir = os.path.join(output_dir, seq_name)
        os.makedirs(save_dir, exist_ok=True)
        save_path = os.path.join(save_dir, seq_name)
        # if os.path.exists(save_path + "_depth.npz"):
        #     return save_path + "_depth.npz"
        
        

        
        # Read video frames
        frames, target_fps = read_video_frames(
            video_path,
            process_length,
            target_fps,
            max_res,
            dataset="open",
        )

        
        
        # Run inference
        with torch.inference_mode():
            res = self.pipe(
                frames,
                height=frames.shape[1],
                width=frames.shape[2],
                output_type="np",
                guidance_scale=guidance_scale,
                num_inference_steps=num_denoising_steps,
                window_size=window_size,
                overlap=overlap,
                track_time=True,
            ).frames[0]
        
        
        # Process depth output
        res = res.sum(-1) / res.shape[-1]  # Convert to single channel
        res = (res - res.min()) / (res.max() - res.min())  # Normalize to [0, 1]
        

        
        # Save depth as npz
        np.savez_compressed(save_path + "_depth.npz", depth=res)
        
        # Save visualization

        vis = vis_sequence_depth(res)
        if vis.shape[-1] == 4:
            vis = vis[...,:3]

        save_video(res, save_path + "_depth.mp4", fps=target_fps)
        save_video(vis, save_path + "_vis.mp4", fps=target_fps)
        save_video(frames, save_path + "_input.mp4", fps=target_fps)


        # Concatenate depth, vis, and input frames side by side for visualization
        # concat_frames = []
        # for i in range(min(len(res), len(frames), len(vis))):
        #     # Normalize depth frame to 0-255 range for visualization
        #     depth_frame = (res[i] * 255).astype(np.uint8)
        #     depth_frame = cv2.cvtColor(depth_frame, cv2.COLOR_GRAY2BGR)

        #     # Prepare vis frame (already color-mapped, but ensure uint8 and 3 channels)
        #     vis_frame = vis[i]
        #     if vis_frame.dtype != np.uint8:
        #         vis_frame = (vis_frame * 255).astype(np.uint8)
        #     if vis_frame.shape[-1] == 1:
        #         vis_frame = cv2.cvtColor(vis_frame, cv2.COLOR_GRAY2BGR)

        #     # Ensure all frames have the same height
        #     target_height = min(depth_frame.shape[0], frames[i].shape[0], vis_frame.shape[0])
        #     depth_frame = depth_frame[:target_height, :, :]
        #     input_frame = frames[i][:target_height, :, :]
        #     vis_frame = vis_frame[:target_height, :, :]

        #     # Concatenate horizontally: input | depth | vis
        #     # 把帧号i画上去
        #     concat_frame = np.concatenate([input_frame, depth_frame, vis_frame], axis=1)
        #     # 在左上角写上帧号i
        #     font = cv2.FONT_HERSHEY_SIMPLEX
        #     font_scale = 1.0
        #     font_color = (255, 0, 0)
        #     thickness = 2
        #     text = f"Frame {i:06d}"
        #     # 计算文本位置
        #     text_size, _ = cv2.getTextSize(text, font, font_scale, thickness)
        #     text_x = 10
        #     text_y = 10 + text_size[1]
        #     cv2.putText(concat_frame, text, (text_x, text_y), font, font_scale, font_color, thickness, cv2.LINE_AA)
            
        #     concat_frames.append(concat_frame)

        # concat_frames = np.array(concat_frames)
        # save_video(concat_frames, save_path + "_concat.mp4", fps=target_fps)
        
        
        # Clear memory
        gc.collect()
        torch.cuda.empty_cache()
        
        return save_path + "_depth.npz"

    def process_jsonl_entry(self, entry, output_dir, temp_dir="./temp_videos",jsonl_root=None,process_length=195):
        """Process a single entry from the jsonl file"""
        
        seq_name = entry["seq_name"]
        rgb_dir = entry["rgb"]

        if "rgb-suffix" in entry:
            rgb_suffix = entry["rgb-suffix"]
        else:
            rgb_suffix = "*.png"
        
        # Create temporary video from images
        os.makedirs(temp_dir, exist_ok=True)
        temp_video_path = os.path.join(temp_dir, f"{seq_name.replace('#', '_')}.mp4")
        
        try:
            # Create video from image sequence
            self.create_video_from_images(join(jsonl_root,rgb_dir), rgb_suffix, temp_video_path)
            
            # Run DepthCrafter inference
            depth_npz_path = self.infer_depth(
                temp_video_path,
                output_dir,
                # seq_name.replace('#', '_'),
                seq_name,
                process_length=process_length,
                
            )
            
            return {
                "seq_name": seq_name,
                "status": "success",
                "depth_path": depth_npz_path,
                "input_video": temp_video_path
            }
            
        except Exception as e:
            print(f"Error processing {seq_name}: {str(e)}")
            return {
                "seq_name": seq_name,
                "status": "error",
                "error": str(e)
            }
        finally:
            # Clean up temporary video
            if os.path.exists(temp_video_path):
                os.remove(temp_video_path)


def main():
    parser = argparse.ArgumentParser(description="Process JSONL file with DepthCrafter")
    parser.add_argument("--jsonl_path", type=str, required=True, help="Path to text.jsonl file")
    parser.add_argument("--output_dir", type=str, default="./depthcrafter_output", help="Output directory")
    parser.add_argument("--temp_dir", type=str, default="./temp_videos", help="Temporary directory for videos")
    parser.add_argument("--unet_path", type=str, default="tencent/DepthCrafter", help="UNet model path")
    parser.add_argument("--pre_train_path", type=str, default="stabilityai/stable-video-diffusion-img2vid-xt", help="Pre-trained model path")
    parser.add_argument("--cpu_offload", type=str, default="model", choices=["model", "sequential", None], help="CPU offload strategy")
    parser.add_argument("--num_denoising_steps", type=int, default=5, help="Number of denoising steps")
    parser.add_argument("--guidance_scale", type=float, default=1.0, help="Guidance scale")
    parser.add_argument("--max_res", type=int, default=1024, help="Maximum resolution")
    parser.add_argument("--process_length", type=int, default=-1, help="Process length")
    
    args = parser.parse_args()
    
    # Initialize processor
    processor = DepthCrafterProcessor(
        unet_path=args.unet_path,
        pre_train_path=args.pre_train_path,
        cpu_offload=args.cpu_offload
    )
    
    # Read JSONL file
    entries = []
    jsonl_root= os.path.dirname(args.jsonl_path)
    with open(args.jsonl_path, 'r') as f:
        for line in f:
            entries.append(json.loads(line.strip()))
    
    print(f"Found {len(entries)} entries to process")

    if args.process_length == -1:
        args.process_length = 1e5
    
    # Process each entry
    results = []
    for entry in tqdm(entries, desc="Processing entries"):
        result = processor.process_jsonl_entry(entry, args.output_dir, args.temp_dir,jsonl_root,args.process_length)
        results.append(result)
        
        # Save intermediate results
        os.makedirs(args.output_dir, exist_ok=True)
        with open(os.path.join(args.output_dir, "processing_results.json"), 'w') as f:
            json.dump(results, f, indent=2)

    
    # Print summary
    successful = sum(1 for r in results if r["status"] == "success")
    failed = len(results) - successful
    
    print(f"\nProcessing complete!")
    print(f"Successful: {successful}")
    print(f"Failed: {failed}")
    print(f"Results saved to: {args.output_dir}")


if __name__ == "__main__":
    main()
