#!/usr/bin/env python3
"""
DaVinci Resolve Silent Parts Auto Cut Script
This script automatically detects and removes silent parts in the timeline.
"""

import sys
import time
from typing import Optional, Dict, List
import DaVinciResolveScript as dvr_script

def get_resolve() -> Optional[object]:
    """Connect to DaVinci Resolve."""
    try:
        resolve = dvr_script.scriptapp("Resolve")
        return resolve
    except Exception as e:
        print(f"Error connecting to DaVinci Resolve: {str(e)}")
        return None

def get_project_manager(resolve: object) -> Optional[object]:
    """Get the project manager."""
    try:
        project_manager = resolve.GetProjectManager()
        return project_manager
    except Exception as e:
        print(f"Error getting project manager: {str(e)}")
        return None

def get_current_project(project_manager: object) -> Optional[object]:
    """Get the current project."""
    try:
        current_project = project_manager.GetCurrentProject()
        return current_project
    except Exception as e:
        print(f"Error getting current project: {str(e)}")
        return None

def get_current_timeline(project: object) -> Optional[object]:
    """Get the current timeline."""
    try:
        current_timeline = project.GetCurrentTimeline()
        return current_timeline
    except Exception as e:
        print(f"Error getting current timeline: {str(e)}")
        return None

def detect_silence(timeline: object, threshold_db: float = -50.0, min_silence_duration: float = 1.0) -> List[Dict]:
    """
    Detect silent segments in the timeline.
    
    Args:
        timeline: Timeline object
        threshold_db: Silence threshold in dB (default: -50.0)
        min_silence_duration: Minimum silence duration in seconds (default: 1.0)
    
    Returns:
        List of dictionaries containing start and end frames of silent segments
    """
    silent_segments = []
    try:
        # Get timeline properties
        start_frame = 0
        end_frame = timeline.GetEndFrame()
        frame_rate = timeline.GetSetting('frameRate')
        
        # Convert duration to frames
        min_silence_frames = int(min_silence_duration * frame_rate)
        
        # Analyze audio levels
        current_silent_start = None
        
        for frame in range(start_frame, end_frame):
            # Get audio level at current frame
            audio_level = timeline.GetMaxLevelAtFrame(frame)
            
            if audio_level <= threshold_db:
                if current_silent_start is None:
                    current_silent_start = frame
            else:
                if current_silent_start is not None:
                    duration = frame - current_silent_start
                    if duration >= min_silence_frames:
                        silent_segments.append({
                            'start': current_silent_start,
                            'end': frame
                        })
                    current_silent_start = None
        
        # Check for silence at the end of timeline
        if current_silent_start is not None:
            duration = end_frame - current_silent_start
            if duration >= min_silence_frames:
                silent_segments.append({
                    'start': current_silent_start,
                    'end': end_frame
                })
                
    except Exception as e:
        print(f"Error detecting silence: {str(e)}")
    
    return silent_segments

def cut_silent_segments(timeline: object, silent_segments: List[Dict]) -> bool:
    """
    Cut silent segments from the timeline.
    
    Args:
        timeline: Timeline object
        silent_segments: List of dictionaries containing start and end frames
    
    Returns:
        bool: True if successful, False otherwise
    """
    try:
        # Sort segments in reverse order to avoid frame number shifting
        silent_segments.sort(key=lambda x: x['start'], reverse=True)
        
        for segment in silent_segments:
            # Set in point
            timeline.SetCurrentTimecode(segment['start'])
            timeline.SetInPoint()
            
            # Set out point
            timeline.SetCurrentTimecode(segment['end'])
            timeline.SetOutPoint()
            
            # Delete segment
            timeline.DeleteInToOut()
        
        return True
    except Exception as e:
        print(f"Error cutting silent segments: {str(e)}")
        return False

def main():
    """Main function."""
    # Connect to DaVinci Resolve
    resolve = get_resolve()
    if not resolve:
        sys.exit(1)
    
    # Get project manager
    project_manager = get_project_manager(resolve)
    if not project_manager:
        sys.exit(1)
    
    # Get current project
    project = get_current_project(project_manager)
    if not project:
        sys.exit(1)
    
    # Get current timeline
    timeline = get_current_timeline(project)
    if not timeline:
        sys.exit(1)
    
    print("Analyzing audio for silence...")
    
    # Detect silent segments
    silent_segments = detect_silence(
        timeline,
        threshold_db=-50.0,  # Adjust threshold as needed
        min_silence_duration=1.0  # Adjust minimum silence duration as needed
    )
    
    if not silent_segments:
        print("No silent segments found.")
        sys.exit(0)
    
    print(f"Found {len(silent_segments)} silent segments.")
    
    # Cut silent segments
    if cut_silent_segments(timeline, silent_segments):
        print("Successfully removed silent segments.")
    else:
        print("Failed to remove silent segments.")

if __name__ == "__main__":
    main()
