from env import World
from agent import Agent
from viz import CognitiveVisualizer
import matplotlib.pyplot as plt
import numpy as np
import librosa
import soundfile as sf
import json
import os
import time
from pydub import AudioSegment
import io

def main():
    """Demo 3: Beauty perception and capture from audio signals"""
    print("\n=== Initializing Audio Beauty Perception System ===")
    
    # Initialize agent and visualizer
    agent = Agent()
    visualizer = CognitiveVisualizer()
    
    # Load and process test audio files
    test_audio = [
        "data/audio.1.wav",   # e.g., classical music
        #"data/audio.2.m4a",   # e.g., nature sounds
        #"data/audio.3.m4a"    # e.g., nature sounds
    ]
    
    beauty_analysis = []
    
    for audio_path in test_audio:
        print(f"\n{'='*50}")
        print(f"🎵 Processing audio: {audio_path}")
        print(f"{'='*50}")
        
        # 1. Perception Phase
        print("\n1️⃣ Perception Phase:")
        audio = load_and_preprocess_audio(audio_path)
        if audio is None:
            print(f"❌ Failed to load audio: {audio_path}")
            continue
            
        perception = agent.perception_space.analyze_audio_signal(audio)
        if perception is None:
            print("❌ No audio features detected.")
            continue
        print("Audio Features Detected:")
        print_dict(perception, indent=2)
        
        # 2. Planning Phase
        print("\n2️⃣ Planning Phase:")
        analysis_plan = agent.planning_space.create_analysis_plan(
            perception,
            aspects=[
                "rhythm_harmony",
                "tonal_balance",
                "emotional_resonance",
                "sonic_texture"
            ]
        )
        print("Analysis Plan Created:")
        print_dict(analysis_plan, indent=2)
        
        # 3. Reasoning Phase
        print("\n3️⃣ Reasoning Phase:")
        beauty_score = agent.reasoning_space.evaluate_beauty(
            perception=perception,
            plan=analysis_plan
        )
        print("Beauty Evaluation Results:")
        print_dict(beauty_score, indent=2)
        
        # 4. Action Phase
        print("\n4️⃣ Action Phase:")
        if beauty_score["overall_score"] > 0.7:
            print("✨ High Beauty Score Detected - Capturing Elements")
            beauty_elements = agent.capture_beauty_elements(
                audio=audio,
                perception=perception,
                beauty_score=beauty_score
            )
            
            # Record analysis
            analysis = {
                "audio": audio_path,
                "beauty_score": beauty_score,
                "key_elements": beauty_elements
            }
            beauty_analysis.append(analysis)
            
            # Visualize audio beauty elements
            visualizer.highlight_audio_elements(
                audio=audio,
                elements=beauty_elements,
                scores=beauty_score
            )
            
            print("\nKey Beauty Elements Captured:")
            print_dict(beauty_elements, indent=2)
        else:
            print("⚠️ Beauty score below threshold - Skipping capture")
    
    # Save results
    save_analysis_results(beauty_analysis)
    
    # Display final summary
    print(f"\n{'='*50}")
    print("📊 Final Audio Beauty Analysis Summary")
    print(f"{'='*50}")
    
    for i, analysis in enumerate(beauty_analysis, 1):
        print(f"\n🎵 Audio {i}: {analysis['audio']}")
        print(f"Overall Beauty Score: {analysis['beauty_score']['overall_score']:.2f}")
        print("Aspect Scores:")
        for aspect, score in analysis['beauty_score']['aspect_scores'].items():
            print(f"  • {aspect}: {score:.2f}")
        print("Emotional Impact:")
        print_dict(analysis['beauty_score']['emotional_impact'], indent=2)
    
    # Keep visualization open
    plt.show()

def print_dict(d, indent=0):
    """Pretty print nested dictionary"""
    for key, value in d.items():
        if isinstance(value, dict):
            print(" " * indent + f"📌 {key}:")
            print_dict(value, indent + 2)
        elif isinstance(value, list):
            print(" " * indent + f"📌 {key}:")
            for item in value:
                if isinstance(item, dict):
                    print_dict(item, indent + 2)
                else:
                    print(" " * indent + f"  • {item}")
        else:
            if isinstance(value, float):
                print(" " * indent + f"• {key}: {value:.2f}")
            else:
                print(" " * indent + f"• {key}: {value}")

def load_and_preprocess_audio(path):
    """Load and preprocess audio for analysis"""
    try:
        # Load audio file using pydub (supports m4a)
        audio = AudioSegment.from_file(path)
        
        # Convert to numpy array
        samples = np.array(audio.get_array_of_samples())
        
        # Convert to float32 and normalize
        samples = samples.astype(np.float32) / np.iinfo(samples.dtype).max
        
        # If stereo, convert to mono
        if audio.channels == 2:
            samples = samples.reshape((-1, 2)).mean(axis=1)
            
        print(f"✅ Successfully loaded audio: {path}")
        print(f"   Duration: {audio.duration_seconds:.2f}s, Sample Rate: {audio.frame_rate}Hz")
        
        return {
            'waveform': samples,
            'sr': audio.frame_rate,
            'duration': audio.duration_seconds
        }
    except Exception as e:
        print(f"❌ Error loading audio {path}: {str(e)}")
        return None

def generate_recommendations(beauty_elements):
    """Generate recommendations based on beauty analysis"""
    recommendations = []
    for element in beauty_elements:
        if element['score'] < 0.6:
            recommendations.append(f"Consider improving {element['type']}")
    return recommendations

def save_analysis_results(beauty_analysis, output_path="results/ai.agent.demo.3.preliminary.results.json"):
    """Save audio analysis results"""
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    results = {
        "metadata": {
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
            "total_audio_files": len(beauty_analysis),
            "version": "0.1.0"
        },
        "cognitive_spaces_metrics": {
            "perception_space": {
                "feature_extraction_success_rate": _calculate_perception_success_rate(beauty_analysis),
                "detected_features": _summarize_detected_features(beauty_analysis),
                "audio_quality_metrics": _summarize_audio_metrics(beauty_analysis)
            },
            "planning_space": {
                "plan_completion_rate": _calculate_plan_completion_rate(beauty_analysis),
                "average_steps_per_plan": _calculate_avg_steps(beauty_analysis),
                "aspect_coverage": _summarize_aspect_coverage(beauty_analysis)
            },
            "reasoning_space": {
                "average_confidence": _calculate_avg_confidence(beauty_analysis),
                "aspect_correlations": _calculate_aspect_correlations(beauty_analysis),
                "emotional_resonance": _summarize_emotional_metrics(beauty_analysis)
            },
            "action_space": {
                "capture_success_rate": _calculate_capture_rate(beauty_analysis),
                "action_effectiveness": _calculate_action_effectiveness(beauty_analysis),
                "key_moments": _summarize_key_moments(beauty_analysis)
            }
        },
        "emotional_metrics": {
            "average_beauty_score": _calculate_avg_beauty_score(beauty_analysis),
            "emotional_impact": _summarize_emotional_impact(beauty_analysis),
            "resonance_patterns": _analyze_resonance_patterns(beauty_analysis)
        },
        "detailed_analysis": beauty_analysis
    }
    
    with open(output_path, 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)
    print(f"\n💾 Results saved to: {output_path}")

def _calculate_perception_success_rate(analysis):
    """Calculate success rate of feature extraction"""
    total = len(analysis)
    successful = sum(1 for item in analysis if item.get('perception', {}).get('deep_features') is not None)
    return successful / total if total > 0 else 0

def _summarize_detected_features(analysis):
    """Summarize types of features detected across all audio files"""
    feature_types = set()
    for item in analysis:
        if 'perception' in item:
            feature_types.update(item['perception'].keys())
    return list(feature_types)

def _calculate_plan_completion_rate(analysis):
    """Calculate rate of successful plan execution"""
    total_steps = 0
    completed_steps = 0
    for item in analysis:
        plan = item.get('beauty_score', {}).get('aspect_scores', {})
        total_steps += len(plan)
        completed_steps += sum(1 for score in plan.values() if score > 0)
    return completed_steps / total_steps if total_steps > 0 else 0

def _calculate_avg_confidence(analysis):
    """Calculate average confidence across all evaluations"""
    confidences = [item.get('beauty_score', {}).get('confidence', 0) for item in analysis]
    return sum(confidences) / len(confidences) if confidences else 0

def _calculate_capture_rate(analysis):
    """Calculate success rate of beauty element capture"""
    total = len(analysis)
    successful = sum(1 for item in analysis if item.get('key_elements'))
    return successful / total if total > 0 else 0

def _calculate_avg_beauty_score(analysis):
    """Calculate average beauty score across all audio files"""
    scores = [item.get('beauty_score', {}).get('overall_score', 0) for item in analysis]
    return sum(scores) / len(scores) if scores else 0

def _summarize_emotional_metrics(analysis):
    """Summarize emotional impact metrics"""
    impacts = []
    for item in analysis:
        if 'beauty_score' in item and 'emotional_impact' in item['beauty_score']:
            impacts.append(item['beauty_score']['emotional_impact'])
    
    if not impacts:
        return {}
        
    avg_impact = {}
    for key in impacts[0].keys():
        values = [impact[key] for impact in impacts]
        avg_impact[key] = sum(values) / len(values)
    
    return avg_impact

def _summarize_audio_metrics(analysis):
    """Summarize audio quality metrics across all audio files"""
    metrics = {
        "average_rhythm_harmony": 0.0,
        "average_tonal_balance": 0.0,
        "average_emotional_resonance": 0.0,
        "sonic_texture_scores": []
    }
    
    count = 0
    for item in analysis:
        if 'perception' in item and 'audio_features' in item['perception']:
            audio = item['perception']['audio_features']
            metrics['average_rhythm_harmony'] += audio.get('rhythm_harmony', 0)
            metrics['average_tonal_balance'] += audio.get('tonal_balance', 0)
            metrics['average_emotional_resonance'] += audio.get('emotional_resonance', 0)
            metrics['sonic_texture_scores'].append(audio.get('sonic_texture', 0))
            count += 1
    
    if count > 0:
        metrics['average_rhythm_harmony'] /= count
        metrics['average_tonal_balance'] /= count
        metrics['average_emotional_resonance'] /= count
        
    return metrics

def _calculate_avg_steps(analysis):
    """Calculate average number of steps per plan"""
    total_steps = sum(len(item.get('beauty_score', {}).get('aspect_scores', {})) for item in analysis)
    return total_steps / len(analysis) if analysis else 0

def _summarize_aspect_coverage(analysis):
    """Summarize which aspects were analyzed"""
    all_aspects = set()
    for item in analysis:
        aspects = item.get('beauty_score', {}).get('aspect_scores', {}).keys()
        all_aspects.update(aspects)
    return list(all_aspects)

def _calculate_aspect_correlations(analysis):
    """Calculate correlations between different aspects"""
    correlations = {}
    for item in analysis:
        scores = item.get('beauty_score', {}).get('aspect_scores', {})
        for aspect1 in scores:
            for aspect2 in scores:
                if aspect1 < aspect2:  # Avoid duplicates
                    key = f"{aspect1}_vs_{aspect2}"
                    if key not in correlations:
                        correlations[key] = []
                    correlations[key].append((scores[aspect1], scores[aspect2]))
    
    # Calculate correlation coefficients
    result = {}
    for key, values in correlations.items():
        if len(values) > 1:  # Need at least 2 points for correlation
            x = [v[0] for v in values]
            y = [v[1] for v in values]
            correlation = np.corrcoef(x, y)[0, 1]
            result[key] = float(correlation)  # Convert to native Python float
    
    return result

def _summarize_emotional_impact(analysis):
    """Summarize emotional impact metrics"""
    impacts = []
    for item in analysis:
        if 'beauty_score' in item and 'emotional_impact' in item['beauty_score']:
            impacts.append(item['beauty_score']['emotional_impact'])
    
    if not impacts:
        return {}
        
    avg_impact = {}
    for key in impacts[0].keys():
        values = [impact[key] for impact in impacts]
        avg_impact[key] = sum(values) / len(values)
    
    return avg_impact

def _analyze_resonance_patterns(analysis):
    """Analyze resonance patterns in audio"""
    patterns = []
    for item in analysis:
        if item.get('key_elements'):
            patterns.append({
                "audio": item['audio'],
                "beauty_score": item['beauty_score']['overall_score'],
                "resonance_pattern": _summarize_resonance_pattern(item['key_elements'])
            })
    return patterns

def _summarize_resonance_pattern(elements):
    """Summarize resonance pattern based on beauty elements"""
    pattern = ""
    for element in elements:
        pattern += element['type'] + " "
    return pattern.strip()

def _summarize_key_moments(analysis):
    """Summarize key moments in audio"""
    moments = []
    for item in analysis:
        if item.get('key_elements'):
            moments.append({
                "audio": item['audio'],
                "beauty_score": item['beauty_score']['overall_score'],
                "key_moment": _summarize_key_moment(item['key_elements'])
            })
    return moments

def _summarize_key_moment(elements):
    """Summarize key moment based on beauty elements"""
    moment = ""
    for element in elements:
        moment += element['type'] + " "
    return moment.strip()

def _calculate_action_effectiveness(analysis):
    """Calculate effectiveness of actions taken"""
    total_attempts = len(analysis)
    successful_captures = sum(1 for item in analysis if item.get('key_elements'))
    return successful_captures / total_attempts if total_attempts > 0 else 0

# Add other helper functions as needed...

if __name__ == "__main__":
    main()