from env import World
from agent import Agent
from viz import CognitiveVisualizer
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import cv2
import json
import os
import time

def main():
    """Demo 2: Beauty perception and capture from visual signals"""
    print("\n=== Initializing Beauty Perception System ===")
    
    # Initialize agent and visualizer
    agent = Agent()
    visualizer = CognitiveVisualizer()
    
    # Load and process test images
    test_images = [
        "data/env.1.jpg",
        "data/env.2.jpg",
        "data/env.3.jpg",
        "data/env.4.jpg"
    ]
    
    beauty_analysis = []
    
    for img_path in test_images:
        print(f"\n{'='*50}")
        print(f"🖼️ Processing image: {img_path}")
        print(f"{'='*50}")
        
        # 1. Perception Phase
        print("\n1️⃣ Perception Phase:")
        image = load_and_preprocess_image(img_path)
        if image is None:
            print(f"❌ Failed to load image: {img_path}")
            continue
            
        perception = agent.perception_space.analyze_visual_signal(image)
        print("Visual Features Detected:")
        print_dict(perception, indent=2)
        
        # 2. Planning Phase
        print("\n2️⃣ Planning Phase:")
        analysis_plan = agent.planning_space.create_analysis_plan(
            perception,
            aspects=[
                "color_harmony",
                "composition",
                "style_coherence",
                "trend_alignment"
            ]
        )
        print("Analysis Plan Created:")
        print_dict(analysis_plan, indent=2)
        
        # 3. Reasoning Phase
        print("\n3️⃣ Reasoning Phase:")
        beauty_score = agent.reasoning_space.evaluate_beauty(
            perception=perception,
            plan=analysis_plan
        )
        print("Beauty Evaluation Results:")
        print_dict(beauty_score, indent=2)
        
        # 4. Action Phase
        print("\n4️⃣ Action Phase:")
        if beauty_score["overall_score"] > 0.7:
            print("✨ High Beauty Score Detected - Capturing Elements")
            beauty_elements = agent.capture_beauty_elements(
                image=image,
                perception=perception,
                beauty_score=beauty_score
            )
            
            # Record analysis
            analysis = {
                "image": img_path,
                "beauty_score": beauty_score,
                "key_elements": beauty_elements
            }
            beauty_analysis.append(analysis)
            
            # Visualize beauty elements
            visualizer.highlight_beauty_elements(
                image=image,
                elements=beauty_elements,
                scores=beauty_score
            )
            
            print("\nKey Beauty Elements Captured:")
            print_dict(beauty_elements, indent=2)
        else:
            print("⚠️ Beauty score below threshold - Skipping capture")
    
    # After analysis loop, save results
    save_analysis_results(beauty_analysis)
    
    # Display final summary
    print(f"\n{'='*50}")
    print("📊 Final Beauty Analysis Summary")
    print(f"{'='*50}")
    
    for i, analysis in enumerate(beauty_analysis, 1):
        print(f"\n📸 Image {i}: {analysis['image']}")
        print(f"Overall Beauty Score: {analysis['beauty_score']['overall_score']:.2f}")
        print("Aspect Scores:")
        for aspect, score in analysis['beauty_score']['aspect_scores'].items():
            print(f"  • {aspect}: {score:.2f}")
        print("Business Impact:")
        print_dict(analysis['beauty_score']['business_impact'], indent=2)
    
    # Keep visualization open
    plt.show()

def print_dict(d, indent=0):
    """Pretty print nested dictionary"""
    for key, value in d.items():
        if isinstance(value, dict):
            print(" " * indent + f"📌 {key}:")
            print_dict(value, indent + 2)
        elif isinstance(value, list):
            print(" " * indent + f"📌 {key}:")
            for item in value:
                if isinstance(item, dict):
                    print_dict(item, indent + 2)
                else:
                    print(" " * indent + f"  • {item}")
        else:
            if isinstance(value, float):
                print(" " * indent + f"• {key}: {value:.2f}")
            else:
                print(" " * indent + f"• {key}: {value}")

def load_and_preprocess_image(path):
    """Load and preprocess image for analysis"""
    try:
        image = cv2.imread(path)
        if image is None:
            raise FileNotFoundError(f"Could not load image: {path}")
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        print(f"✅ Successfully loaded image: {path}")
        print(f"   Shape: {image.shape}")
        return image
    except Exception as e:
        print(f"❌ Error loading image {path}: {str(e)}")
        return None

def generate_recommendations(beauty_elements):
    """Generate recommendations based on beauty analysis"""
    recommendations = []
    for element in beauty_elements:
        if element['score'] < 0.6:
            recommendations.append(f"Consider improving {element['type']}")
    return recommendations

def save_analysis_results(beauty_analysis, output_path="results/ai.agent.demo.2.preliminary.results.json"):
    """Save analysis results with detailed metrics from all cognitive spaces"""
    # Create results directory if it doesn't exist
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    # Prepare comprehensive results
    results = {
        "metadata": {
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
            "total_images": len(beauty_analysis),
            "version": "0.1.0"
        },
        "cognitive_spaces_metrics": {
            "perception_space": {
                "feature_extraction_success_rate": _calculate_perception_success_rate(beauty_analysis),
                "detected_features": _summarize_detected_features(beauty_analysis),
                "visual_quality_metrics": _summarize_visual_metrics(beauty_analysis)
            },
            "planning_space": {
                "plan_completion_rate": _calculate_plan_completion_rate(beauty_analysis),
                "average_steps_per_plan": _calculate_avg_steps(beauty_analysis),
                "aspect_coverage": _summarize_aspect_coverage(beauty_analysis)
            },
            "reasoning_space": {
                "average_confidence": _calculate_avg_confidence(beauty_analysis),
                "aspect_correlations": _calculate_aspect_correlations(beauty_analysis),
                "decision_metrics": _summarize_decision_metrics(beauty_analysis)
            },
            "action_space": {
                "capture_success_rate": _calculate_capture_rate(beauty_analysis),
                "action_effectiveness": _calculate_action_effectiveness(beauty_analysis),
                "intervention_points": _summarize_intervention_points(beauty_analysis)
            }
        },
        "business_metrics": {
            "average_beauty_score": _calculate_avg_beauty_score(beauty_analysis),
            "potential_impact": _summarize_business_impact(beauty_analysis),
            "trend_alignment": _calculate_trend_alignment(beauty_analysis)
        },
        "detailed_analysis": beauty_analysis
    }
    
    # Save to file
    with open(output_path, 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)
    print(f"\n💾 Results saved to: {output_path}")

def _calculate_perception_success_rate(analysis):
    """Calculate success rate of feature extraction"""
    total = len(analysis)
    successful = sum(1 for item in analysis if item.get('perception', {}).get('deep_features') is not None)
    return successful / total if total > 0 else 0

def _summarize_detected_features(analysis):
    """Summarize types of features detected across all images"""
    feature_types = set()
    for item in analysis:
        if 'perception' in item:
            feature_types.update(item['perception'].keys())
    return list(feature_types)

def _calculate_plan_completion_rate(analysis):
    """Calculate rate of successful plan execution"""
    total_steps = 0
    completed_steps = 0
    for item in analysis:
        plan = item.get('beauty_score', {}).get('aspect_scores', {})
        total_steps += len(plan)
        completed_steps += sum(1 for score in plan.values() if score > 0)
    return completed_steps / total_steps if total_steps > 0 else 0

def _calculate_avg_confidence(analysis):
    """Calculate average confidence across all evaluations"""
    confidences = [item.get('beauty_score', {}).get('confidence', 0) for item in analysis]
    return sum(confidences) / len(confidences) if confidences else 0

def _calculate_capture_rate(analysis):
    """Calculate success rate of beauty element capture"""
    total = len(analysis)
    successful = sum(1 for item in analysis if item.get('key_elements'))
    return successful / total if total > 0 else 0

def _calculate_avg_beauty_score(analysis):
    """Calculate average beauty score across all images"""
    scores = [item.get('beauty_score', {}).get('overall_score', 0) for item in analysis]
    return sum(scores) / len(scores) if scores else 0

def _summarize_business_impact(analysis):
    """Summarize business impact metrics"""
    impacts = []
    for item in analysis:
        if 'beauty_score' in item and 'business_impact' in item['beauty_score']:
            impacts.append(item['beauty_score']['business_impact'])
    
    if not impacts:
        return {}
        
    avg_impact = {}
    for key in impacts[0].keys():
        values = [impact[key] for impact in impacts]
        avg_impact[key] = sum(values) / len(values)
    
    return avg_impact

def _summarize_visual_metrics(analysis):
    """Summarize visual quality metrics across all images"""
    metrics = {
        "average_edge_density": 0.0,
        "average_contrast": 0.0,
        "average_homogeneity": 0.0,
        "composition_scores": []
    }
    
    count = 0
    for item in analysis:
        if 'perception' in item and 'spatial_features' in item['perception']:
            spatial = item['perception']['spatial_features']
            metrics['average_edge_density'] += spatial.get('edges', 0)
            metrics['average_contrast'] += spatial.get('texture', {}).get('contrast', 0)
            metrics['average_homogeneity'] += spatial.get('texture', {}).get('homogeneity', 0)
            metrics['composition_scores'].append(spatial.get('composition', 0))
            count += 1
    
    if count > 0:
        metrics['average_edge_density'] /= count
        metrics['average_contrast'] /= count
        metrics['average_homogeneity'] /= count
        
    return metrics

def _calculate_avg_steps(analysis):
    """Calculate average number of steps per plan"""
    total_steps = sum(len(item.get('beauty_score', {}).get('aspect_scores', {})) for item in analysis)
    return total_steps / len(analysis) if analysis else 0

def _summarize_aspect_coverage(analysis):
    """Summarize which aspects were analyzed"""
    all_aspects = set()
    for item in analysis:
        aspects = item.get('beauty_score', {}).get('aspect_scores', {}).keys()
        all_aspects.update(aspects)
    return list(all_aspects)

def _calculate_aspect_correlations(analysis):
    """Calculate correlations between different aspects"""
    correlations = {}
    for item in analysis:
        scores = item.get('beauty_score', {}).get('aspect_scores', {})
        for aspect1 in scores:
            for aspect2 in scores:
                if aspect1 < aspect2:  # Avoid duplicates
                    key = f"{aspect1}_vs_{aspect2}"
                    if key not in correlations:
                        correlations[key] = []
                    correlations[key].append((scores[aspect1], scores[aspect2]))
    
    # Calculate correlation coefficients
    result = {}
    for key, values in correlations.items():
        if len(values) > 1:  # Need at least 2 points for correlation
            x = [v[0] for v in values]
            y = [v[1] for v in values]
            correlation = np.corrcoef(x, y)[0, 1]
            result[key] = float(correlation)  # Convert to native Python float
    
    return result

def _summarize_decision_metrics(analysis):
    """Summarize decision-making metrics"""
    return {
        "average_confidence": _calculate_avg_confidence(analysis),
        "decisions_made": len(analysis),
        "successful_captures": sum(1 for item in analysis if item.get('key_elements'))
    }

def _calculate_action_effectiveness(analysis):
    """Calculate effectiveness of actions taken"""
    total_attempts = len(analysis)
    successful_captures = sum(1 for item in analysis if item.get('key_elements'))
    return successful_captures / total_attempts if total_attempts > 0 else 0

def _summarize_intervention_points(analysis):
    """Summarize points where action was taken"""
    interventions = []
    for item in analysis:
        if item.get('key_elements'):
            interventions.append({
                "image": item['image'],
                "beauty_score": item['beauty_score']['overall_score'],
                "elements_captured": len(item['key_elements'])
            })
    return interventions

def _calculate_trend_alignment(analysis):
    """Calculate alignment with current trends"""
    trend_scores = [
        item.get('beauty_score', {}).get('aspect_scores', {}).get('trend_alignment', 0)
        for item in analysis
    ]
    return sum(trend_scores) / len(trend_scores) if trend_scores else 0

# Add other helper functions as needed...

if __name__ == "__main__":
    main()