#!/usr/bin/env python3
import os
import re
import sys
import argparse
import subprocess
import tempfile
import shutil
import numpy as np
import librosa
import soundfile as sf
from pydub import AudioSegment
import webrtcvad
import collections
import contextlib
import wave
import json
from pathlib import Path
from loguru import logger


class DouyinProcessor:
    def __init__(self, output_dir="processed_audio"):
        """Initialize the processor with output directory"""
        self.output_dir = output_dir
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        # Create subdirectories
        self.raw_dir = os.path.join(output_dir, "raw")
        self.vocals_dir = os.path.join(output_dir, "vocals")
        self.segments_dir = os.path.join(output_dir, "segments")
        
        for dir_path in [self.raw_dir, self.vocals_dir, self.segments_dir]:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
                
    def download_video(self, url, output_name=None):
        """Download video from Douyin and extract audio"""
        logger.info(f"Downloading video from {url}...")
        
        if output_name:
            output_file = os.path.join(self.raw_dir, f"{output_name}.mp3")
        else:
            # Use a timestamp if no name provided
            import time
            output_file = os.path.join(self.raw_dir, f"douyin_{int(time.time())}.mp3")
            
        try:
            # Using yt-dlp to download the video and extract audio
            cmd = [
                "yt-dlp", 
                "--extract-audio", 
                "--audio-format", "mp3",
                "--audio-quality", "0",  # Best quality
                "-o", output_file.replace(".mp3", ".%(ext)s"),
                url
            ]
            subprocess.run(cmd, check=True)
            logger.info(f"Successfully downloaded audio to {output_file}")
            return output_file
        except subprocess.CalledProcessError as e:
            logger.info(f"Error downloading video: {e}")
            return None
            
    def extract_vocals(self, audio_file, output_name=None):
        """Extract vocals from the audio file using Spleeter"""
        logger.info(f"Extracting vocals from {audio_file}...")
        
        if output_name:
            output_path = os.path.join(self.vocals_dir, f"{output_name}_vocals.wav")
        else:
            base_name = os.path.basename(audio_file).split('.')[0]
            output_path = os.path.join(self.vocals_dir, f"{base_name}_vocals.wav")
        
        # Temporary directory for Spleeter output
        with tempfile.TemporaryDirectory() as temp_dir:
            try:
                # Using Spleeter to separate vocals
                cmd = [
                    "spleeter", "separate",
                    "-p", "spleeter:2stems", 
                    "-o", temp_dir,
                    audio_file
                ]
                subprocess.run(cmd, check=True)
                
                # Find the vocals file in the temp directory
                base_name = os.path.basename(audio_file).split('.')[0]
                vocals_file = os.path.join(temp_dir, base_name, "vocals.wav")
                
                # Convert to desired format (48kHz, 24-bit, stereo)
                y, sr = librosa.load(vocals_file, sr=48000, mono=False)
                
                # If mono, convert to stereo
                if len(y.shape) == 1:
                    y = np.array([y, y])
                    
                # Save as 24-bit WAV
                sf.write(output_path, y.T, sr, subtype='PCM_24')
                logger.info(f"Vocals extracted and saved to {output_path}")
                return output_path
                
            except Exception as e:
                logger.info(f"Error extracting vocals: {e}")
                return None
    
    def segment_audio(self, audio_file, output_prefix=None, min_segment_length=1.0, max_segment_length=15.0):
        """Segment the audio file into sentences using VAD"""
        logger.info(f"Segmenting audio from {audio_file}...")
        
        if output_prefix:
            prefix = output_prefix
        else:
            base_name = os.path.basename(audio_file).split('.')[0]
            prefix = base_name
            
        # Load the audio
        y, sr = librosa.load(audio_file, sr=16000, mono=True)
        
        # Initialize VAD
        vad = webrtcvad.Vad(3)  # Aggressiveness level 3 (0-3)
        
        # Convert to PCM 16-bit audio for VAD
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            temp_path = temp_file.name
            sf.write(temp_path, y, sr, subtype='PCM_16')
        
        try:
            # Get voice segments
            segments = self._get_voice_segments(temp_path, vad)
            
            # Further refine segments based on length constraints
            refined_segments = self._refine_segments(segments, min_length=min_segment_length, max_length=max_segment_length)
            
            # Output segments
            segment_files = []
            for i, (start, end) in enumerate(refined_segments, 1):
                # Get segment from original audio (using the high quality one)
                high_quality_y, high_sr = librosa.load(audio_file, sr=48000, mono=False, offset=start, duration=end-start)
                
                # If mono, convert to stereo
                if len(high_quality_y.shape) == 1:
                    high_quality_y = np.array([high_quality_y, high_quality_y])
                
                # Create output file name
                output_file = os.path.join(self.segments_dir, f"{prefix}_segment_{i:03d}.wav")
                
                # Save as 24-bit WAV
                sf.write(output_file, high_quality_y.T, high_sr, subtype='PCM_24')
                segment_files.append(output_file)
                
            logger.info(f"Audio segmented into {len(segment_files)} segments")
            return segment_files
                
        finally:
            # Clean up temp file
            os.unlink(temp_path)
    
    def _get_voice_segments(self, audio_file, vad, frame_duration_ms=30, padding_ms=300):
        """Extract voice segments using WebRTC VAD"""
        # Read the wave file
        with contextlib.closing(wave.open(audio_file, 'rb')) as wf:
            num_channels = wf.getnchannels()
            sample_width = wf.getsampwidth()
            sample_rate = wf.getframerate()
            pcm_data = wf.readframes(wf.getnframes())
            
        # Convert frame duration to samples
        frame_size = int(sample_rate * (frame_duration_ms / 1000.0))
        frame_step = frame_size
        padding_size = int(sample_rate * (padding_ms / 1000.0))
        
        # Split audio into frames
        frames = self._frame_generator(frame_size, pcm_data, sample_width)
        frames = list(frames)
        
        # Process each frame with VAD
        is_speech = []
        for frame in frames:
            is_speech.append(vad.is_speech(frame.bytes, sample_rate))
        
        # Group speech frames into segments
        segments = []
        in_voice = False
        segment_start = 0
        
        for i, speech in enumerate(is_speech):
            if speech and not in_voice:
                # Start of speech
                in_voice = True
                segment_start = max(0, i * frame_duration_ms / 1000.0 - padding_ms / 1000.0)
            elif not speech and in_voice:
                # End of speech
                in_voice = False
                segment_end = min((i + 1) * frame_duration_ms / 1000.0 + padding_ms / 1000.0, 
                                len(pcm_data) / (sample_rate * sample_width * num_channels))
                segments.append((segment_start, segment_end))
        
        # Handle if still in speech at the end
        if in_voice:
            segment_end = len(pcm_data) / (sample_rate * sample_width * num_channels)
            segments.append((segment_start, segment_end))
            
        return segments
    
    def _frame_generator(self, frame_size, audio, sample_width):
        """Generate audio frames from audio data"""
        Frame = collections.namedtuple('Frame', 'bytes')
        n = len(audio)
        offset = 0
        while offset + frame_size <= n:
            yield Frame(bytes=audio[offset:offset + frame_size])
            offset += frame_size
            
    def _refine_segments(self, segments, min_length=1.0, max_length=15.0):
        """Refine segments based on length constraints"""
        refined = []
        for start, end in segments:
            duration = end - start
            
            if duration < min_length:
                # Skip segments that are too short
                continue
                
            if duration <= max_length:
                # Accept segments within length limit
                refined.append((start, end))
            else:
                # Split long segments
                n_parts = int(np.ceil(duration / max_length))
                part_duration = duration / n_parts
                
                for i in range(n_parts):
                    part_start = start + i * part_duration
                    part_end = start + (i + 1) * part_duration
                    if part_end > end:
                        part_end = end
                    refined.append((part_start, part_end))
                    
        return refined

    def process_video(self, url, output_name=None):
        """Process a single video: download, extract vocals, and segment"""
        # Download the video
        audio_file = self.download_video(url, output_name)
        if not audio_file:
            return None
            
        # Extract vocals
        vocals_file = self.extract_vocals(audio_file, output_name)
        if not vocals_file:
            return None
            
        # Segment the vocals
        segments = self.segment_audio(vocals_file, output_name)
        return segments


def main():
    parser = argparse.ArgumentParser(description="Download and process Douyin videos")
    parser.add_argument("--url", help="Douyin video URL")
    parser.add_argument("--output_dir", default="processed_audio", help="Output directory")
    parser.add_argument("--output_name", help="Base name for output files")
    
    args = parser.parse_args()
    
    if not args.url:
        logger.info("Error: Please provide a Douyin video URL")
        parser.logger.info_help()
        sys.exit(1)
        
    processor = DouyinProcessor(args.output_dir)
    segments = processor.process_video(args.url, args.output_name)
    
    if segments:
        logger.info(f"Successfully processed video. {len(segments)} segments saved to {processor.segments_dir}")
    else:
        logger.info("Failed to process video")


if __name__ == "__main__":
    main() 