#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Smart Token Expander with Advanced Controls

This module provides an intelligent token expansion system that can handle
large-scale expansions while providing monitoring and control mechanisms.

Copyright 2024 Chromium Authors
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""

import sqlite3
import re
import json
import logging
import os
from typing import Dict, List, Optional, Tuple, Set
from pathlib import Path
from dataclasses import dataclass
from datetime import datetime


# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


@dataclass
class ExpansionStats:
    """Statistics about token expansion."""
    total_histograms: int = 0
    histograms_with_tokens: int = 0
    total_expansions: int = 0
    truncated_histograms: int = 0
    max_expansions_single: int = 0
    processing_time: float = 0.0
    memory_usage_mb: float = 0.0


class SmartTokenExpander:
    """Intelligent token expander with advanced controls."""

    def __init__(self, db_path: str, max_expansions_per_histogram: int = 10000,
                 max_total_expansions: int = 100000):
        self.db_path = db_path
        self.conn = None
        self.max_expansions_per_histogram = max_expansions_per_histogram
        self.max_total_expansions = max_total_expansions
        self.expansion_stats = ExpansionStats()
        self.warning_threshold = 1000  # Warn if expansions exceed this
        self.critical_threshold = 5000  # Critical if expansions exceed this

    def connect(self):
        """Connect to the database."""
        self.conn = sqlite3.connect(self.db_path)
        self.conn.row_factory = sqlite3.Row

    def close(self):
        """Close database connection."""
        if self.conn:
            self.conn.close()

    def create_expansion_tables(self):
        """Create tables for expanded histogram names with metadata."""
        cursor = self.conn.cursor()

        # Main expanded histograms table
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS expanded_histograms (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                original_metric_id INTEGER NOT NULL,
                expanded_name TEXT NOT NULL,
                token_values TEXT,  -- JSON string of token key-value pairs
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                FOREIGN KEY (original_metric_id) REFERENCES metrics (id) ON DELETE CASCADE
            )
        ''')

        # Expansion metadata table
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS expansion_metadata (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                metric_id INTEGER NOT NULL,
                original_name TEXT NOT NULL,
                expansion_count INTEGER NOT NULL,
                was_truncated BOOLEAN DEFAULT FALSE,
                max_allowed INTEGER NOT NULL,
                processing_time_ms REAL NOT NULL,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                FOREIGN KEY (metric_id) REFERENCES metrics (id) ON DELETE CASCADE
            )
        ''')

        # Statistics table
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS expansion_statistics (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                total_histograms INTEGER NOT NULL,
                histograms_with_tokens INTEGER NOT NULL,
                total_expansions INTEGER NOT NULL,
                truncated_histograms INTEGER NOT NULL,
                max_expansions_single INTEGER NOT NULL,
                processing_time_seconds REAL NOT NULL,
                memory_usage_mb REAL NOT NULL,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')

        # Indexes for performance
        cursor.execute('''
            CREATE INDEX IF NOT EXISTS idx_expanded_histograms_name
            ON expanded_histograms(expanded_name)
        ''')
        cursor.execute('''
            CREATE INDEX IF NOT EXISTS idx_expanded_histograms_original_id
            ON expanded_histograms(original_metric_id)
        ''')

        self.conn.commit()
        logger.info("Created smart expansion tables with metadata")

    def extract_tokens_from_name(self, name: str) -> List[str]:
        """Extract unique token placeholders from histogram name."""
        # Find all {TokenName} patterns
        pattern = r'\{([^}]+)\}'
        tokens = re.findall(pattern, name)
        return list(set(tokens))  # Remove duplicates

    def analyze_potential_expansions(self, metric_id: int, name: str) -> Tuple[int, List[str]]:
        """Analyze potential expansion count before actual expansion."""
        tokens = self.extract_tokens_from_name(name)
        if not tokens:
            return 1, []

        cursor = self.conn.cursor()

        # Get token information
        cursor.execute("""
            SELECT ht.token_key, ht.token_name
            FROM histogram_tokens ht
            WHERE ht.metric_id = ?
        """, (metric_id,))

        token_map = {row['token_key']: row['token_name'] for row in cursor.fetchall()}

        total_combinations = 1
        problematic_tokens = []
        variant_details = {}

        for token_key, token_name in token_map.items():
            cursor.execute("""
                SELECT COUNT(*) as count
                FROM variants v
                JOIN tokens t ON v.token_id = t.id
                WHERE t.token_name = ?
            """, (token_name,))

            variant_count = cursor.fetchone()[0]
            variant_details[token_key] = variant_count

            if variant_count == 0:
                # Token not found, treat as literal (1 combination)
                variant_count = 1

            total_combinations *= variant_count

            # Flag potentially problematic tokens
            if variant_count > self.warning_threshold:
                problematic_tokens.append(f"{token_key}({variant_count} variants)")

        return total_combinations, problematic_tokens

    def expand_single_histogram_smart(self, metric_id: int, name: str) -> Tuple[List[Tuple[str, Dict[str, str]]], bool, Dict]:
        """Expand a single histogram with smart controls."""
        start_time = datetime.now()

        # First analyze potential expansion count
        potential_count, problematic_tokens = self.analyze_potential_expansions(metric_id, name)

        # Log warnings for problematic cases
        if potential_count > self.critical_threshold:
            logger.warning(f"CRITICAL: {name} has {potential_count} potential expansions - will be truncated to {self.max_expansions_per_histogram}")
        elif potential_count > self.warning_threshold:
            logger.warning(f"WARNING: {name} has {potential_count} potential expansions")

        tokens = self.extract_tokens_from_name(name)
        if not tokens:
            return [(name, {})], False, {"potential": 1, "actual": 1, "truncated": False}

        # Get token information
        cursor = self.conn.cursor()
        cursor.execute("""
            SELECT ht.token_key, ht.token_name
            FROM histogram_tokens ht
            WHERE ht.metric_id = ?
        """, (metric_id,))

        token_map = {row['token_key']: row['token_name'] for row in cursor.fetchall()}

        # Get all variants at once
        all_variants = {}
        for token_key, token_name in token_map.items():
            cursor.execute("""
                SELECT v.variant_name
                FROM variants v
                JOIN tokens t ON v.token_id = t.id
                WHERE t.token_name = ?
            """, (token_name,))

            variants = [row['variant_name'] for row in cursor.fetchall()]
            if not variants:
                variants = [f'{{{token_key}}}']  # Treat as literal if no variants found
            all_variants[token_key] = variants

        # Smart expansion with memory management
        expansions = []
        was_truncated = False
        max_allowed = min(potential_count, self.max_expansions_per_histogram)

        # Use iterative approach with early termination
        current_combinations = [{}]

        for token in tokens:
            if not current_combinations:
                break

            new_combinations = []
            variants = all_variants.get(token, [f'{{{token}}}'])

            for base_combo in current_combinations:
                for variant in variants:
                    new_combo = base_combo.copy()
                    new_combo[token] = variant
                    new_combinations.append(new_combo)

                    # Early termination if we've hit our limit
                    if len(new_combinations) >= self.max_expansions_per_histogram:
                        was_truncated = True
                        logger.warning(f"Truncated expansion for {name} at {len(new_combinations)} combinations")
                        break

                if was_truncated:
                    break

            current_combinations = new_combinations

            # Apply memory pressure check
            if len(current_combinations) * 100 > 50000:  # Rough memory estimate
                logger.warning(f"High memory usage detected for {name}: {len(current_combinations)} combinations")

        # Generate final expanded names from surviving combinations
        expansions = []
        for token_values in current_combinations:
            expanded_name = name
            for token_key, token_value in token_values.items():
                expanded_name = expanded_name.replace(f'{{{token_key}}}', token_value)
            expansions.append((expanded_name, token_values))

        processing_time = (datetime.now() - start_time).total_seconds() * 1000

        metadata = {
            "potential": potential_count,
            "actual": len(expansions),
            "truncated": was_truncated,
            "max_allowed": max_allowed,
            "processing_time_ms": processing_time,
            "problematic_tokens": problematic_tokens,
            "variant_details": {k: all_variants.get(k, []) for k in tokens}
        }

        return expansions, was_truncated, metadata

    def expand_all_histograms(self):
        """Expand all histograms with intelligent monitoring."""
        cursor = self.conn.cursor()
        start_time = datetime.now()

        # Get all histograms with tokens
        cursor.execute("""
            SELECT id, name FROM metrics
            WHERE name LIKE '%{%' AND type = 'histogram'
        """)

        histograms_with_tokens = cursor.fetchall()
        total_expansions = 0
        truncated_histograms = 0
        max_expansions_single = 0

        logger.info(f"Found {len(histograms_with_tokens)} histograms with tokens")

        # Clear previous expansions
        cursor.execute("DELETE FROM expanded_histograms")
        cursor.execute("DELETE FROM expansion_metadata")
        self.conn.commit()

        for hist in histograms_with_tokens:
            metric_id = hist['id']
            name = hist['name']

            expansions, was_truncated, metadata = self.expand_single_histogram_smart(metric_id, name)

            # Store expansions
            if expansions:
                cursor.executemany("""
                    INSERT INTO expanded_histograms
                    (original_metric_id, expanded_name, token_values)
                    VALUES (?, ?, ?)
                """, [(metric_id, expanded_name, json.dumps(token_values))
                      for expanded_name, token_values in expansions])

            # Store metadata
            cursor.execute("""
                INSERT INTO expansion_metadata
                (metric_id, original_name, expansion_count, was_truncated,
                 max_allowed, processing_time_ms)
                VALUES (?, ?, ?, ?, ?, ?)
            """, (metric_id, name, len(expansions), was_truncated,
                   metadata['max_allowed'], metadata['processing_time_ms']))

            total_expansions += len(expansions)
            if was_truncated:
                truncated_histograms += 1
            max_expansions_single = max(max_expansions_single, len(expansions))

            # Log progress
            if len(expansions) > 1000 or was_truncated:
                status = "TRUNCATED" if was_truncated else "LARGE"
                logger.info(f"{status}: {name} -> {len(expansions)} expansions (was: {metadata['potential']})")

        # Calculate final statistics
        end_time = datetime.now()
        processing_time = (end_time - start_time).total_seconds()

        # Get memory usage estimate (rough approximation)
        memory_usage_mb = total_expansions * 0.0005  # Rough estimate: 0.5KB per expansion

        self.expansion_stats = ExpansionStats(
            total_histograms=cursor.execute("SELECT COUNT(*) FROM metrics WHERE type = 'histogram'").fetchone()[0],
            histograms_with_tokens=len(histograms_with_tokens),
            total_expansions=total_expansions,
            truncated_histograms=truncated_histograms,
            max_expansions_single=max_expansions_single,
            processing_time=processing_time,
            memory_usage_mb=memory_usage_mb
        )

        # Store statistics
        cursor.execute("""
            INSERT INTO expansion_statistics
            (total_histograms, histograms_with_tokens, total_expansions,
             truncated_histograms, max_expansions_single, processing_time_seconds,
             memory_usage_mb)
            VALUES (?, ?, ?, ?, ?, ?, ?)
        """, (self.expansion_stats.total_histograms,
               self.expansion_stats.histograms_with_tokens,
               self.expansion_stats.total_expansions,
               self.expansion_stats.truncated_histograms,
               self.expansion_stats.max_expansions_single,
               self.expansion_stats.processing_time,
               self.expansion_stats.memory_usage_mb))

        self.conn.commit()

        # Log final results
        logger.info(f"Expansion completed:")
        logger.info(f"  Total histograms: {self.expansion_stats.total_histograms}")
        logger.info(f"  Histograms with tokens: {self.expansion_stats.histograms_with_tokens}")
        logger.info(f"  Total expansions: {self.expansion_stats.total_expansions:,}")
        logger.info(f"  Truncated histograms: {self.expansion_stats.truncated_histograms}")
        logger.info(f"  Max expansions per histogram: {self.expansion_stats.max_expansions_single}")
        logger.info(f"  Processing time: {self.expansion_stats.processing_time:.2f}s")
        logger.info(f"  Estimated memory usage: {self.expansion_stats.memory_usage_mb:.2f}MB")

    def get_problematic_histograms(self) -> List[Dict]:
        """Get histograms that required truncation."""
        cursor = self.conn.cursor()
        cursor.execute("""
            SELECT original_name, expansion_count, was_truncated, max_allowed, processing_time_ms
            FROM expansion_metadata
            WHERE was_truncated = TRUE
            ORDER BY expansion_count DESC
        """)

        results = []
        for row in cursor.fetchall():
            results.append({
                'name': row['original_name'],
                'expansion_count': row['expansion_count'],
                'max_allowed': row['max_allowed'],
                'processing_time_ms': row['processing_time_ms'],
                'truncation_ratio': row['expansion_count'] / row['max_allowed']
            })

        return results

    def get_largest_histograms(self, limit: int = 10) -> List[Dict]:
        """Get histograms with the most expansions."""
        cursor = self.conn.cursor()
        cursor.execute("""
            SELECT original_name, expansion_count, was_truncated
            FROM expansion_metadata
            ORDER BY expansion_count DESC
            LIMIT ?
        """, (limit,))

        results = []
        for row in cursor.fetchall():
            results.append({
                'name': row['original_name'],
                'expansion_count': row['expansion_count'],
                'was_truncated': row['was_truncated']
            })

        return results

    def print_summary_report(self):
        """Print a comprehensive summary report."""
        print("\n" + "="*60)
        print("📊 SMART TOKEN EXPANSION REPORT")
        print("="*60)

        stats = self.expansion_stats
        print(f"\n📈 Overall Statistics:")
        print(f"  Total histograms: {stats.total_histograms:,}")
        print(f"  Histograms with tokens: {stats.histograms_with_tokens}")
        print(f"  Total expansions generated: {stats.total_expansions:,}")
        print(f"  Histograms truncated: {stats.truncated_histograms}")
        print(f"  Max expansions per histogram: {stats.max_expansions_single:,}")
        print(f"  Processing time: {stats.processing_time:.2f} seconds")
        print(f"  Estimated memory usage: {stats.memory_usage_mb:.2f} MB")

        if stats.histograms_with_tokens > 0:
            print(f"  Average expansions per token histogram: {stats.total_expansions/stats.histograms_with_tokens:.1f}")

        if stats.truncated_histograms > 0:
            print(f"\n⚠️  TRUNCATION WARNINGS:")
            print(f"  {stats.truncated_histograms} histograms were truncated")
            print(f"  This is normal for very complex token combinations")

            # Show top problematic histograms
            problematic = self.get_problematic_histograms()[:3]
            print(f"\n  Top problematic histograms:")
            for i, hist in enumerate(problematic, 1):
                print(f"    {i}. {hist['name']}")
                print(f"       Expansions: {hist['expansion_count']:,} (limit: {hist['max_allowed']:,})")
                print(f"       Truncation ratio: {hist['truncation_ratio']:.1f}x")

        print(f"\n🏆 TOP 10 LARGEST EXPANSIONS:")
        largest = self.get_largest_histograms(10)
        for i, hist in enumerate(largest, 1):
            status = "🚨" if hist['was_truncated'] else "✅"
            print(f"    {i:2d}. {status} {hist['expansion_count']:6,} expansions - {hist['name']}")

        print("\n💡 Recommendations:")
        if stats.truncated_histograms > 0:
            print("  • Consider increasing --max-expansions-per-histogram if needed")
            print("  • Review histograms with very large token combinations")
            print("  • Some truncation is normal for complex metrics")
        else:
            print("  • All expansions completed successfully")
            print("  • No truncation required")

        print("\n" + "="*60)


def main():
    """Main function."""
    import argparse

    parser = argparse.ArgumentParser(description='Smart token expansion with controls')
    parser.add_argument('--db', default='./uma_ukm_metrics.db',
                        help='Database file path')
    parser.add_argument('--max-expansions-per-histogram', type=int, default=10000,
                        help='Maximum expansions per histogram (default: 10000)')
    parser.add_argument('--max-total-expansions', type=int, default=100000,
                        help='Maximum total expansions (default: 100000)')
    parser.add_argument('--verbose', action='store_true',
                        help='Enable verbose logging')
    parser.add_argument('--report-only', action='store_true',
                        help='Only show report, do not expand')

    args = parser.parse_args()

    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    expander = SmartTokenExpander(
        args.db,
        args.max_expansions_per_histogram,
        args.max_total_expansions
    )
    expander.connect()

    try:
        expander.create_expansion_tables()

        if not args.report_only:
            print("🚀 Starting smart token expansion...")
            expander.expand_all_histograms()
        else:
            # Load existing statistics
            cursor = expander.conn.cursor()
            cursor.execute("""
                SELECT * FROM expansion_statistics
                ORDER BY created_at DESC
                LIMIT 1
            """)
            result = cursor.fetchone()
            if result:
                expander.expansion_stats = ExpansionStats(
                    total_histograms=result['total_histograms'],
                    histograms_with_tokens=result['histograms_with_tokens'],
                    total_expansions=result['total_expansions'],
                    truncated_histograms=result['truncated_histograms'],
                    max_expansions_single=result['max_expansions_single'],
                    processing_time=result['processing_time_seconds'],
                    memory_usage_mb=result['memory_usage_mb']
                )

        # Print comprehensive report
        expander.print_summary_report()

    except Exception as e:
        logger.error(f"Smart expansion failed: {e}")
        import traceback
        traceback.print_exc()

    finally:
        expander.close()


if __name__ == '__main__':
    main()