"""
Simple SQLite database layer for GCR Solver Manager.

This module provides a lightweight database implementation using SQLite
with a single table design for storing build and test run information.
"""

import sqlite3
import json
import logging
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any
from contextlib import contextmanager

logger = logging.getLogger(__name__)


def _get_local_now() -> datetime:
    """Get current time in local timezone."""
    return datetime.now()


class SimpleDatabase:
    """Simple SQLite database with single table for all run data."""
    
    def __init__(self, db_path: str = "gcr_logs.db"):
        """Initialize database connection and create schema if needed."""
        self.db_path = Path(db_path).resolve()
        self.db_path.parent.mkdir(parents=True, exist_ok=True)
        self._init_schema()
        logger.info(f"Database initialized at {self.db_path}")
    
    def _init_schema(self):
        """Create the single table if it doesn't exist."""
        with self._get_connection() as conn:
            conn.execute('''
                CREATE TABLE IF NOT EXISTS runs (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    type TEXT NOT NULL,                    -- 'build' or 'test'
                    solver TEXT NOT NULL,                  -- 'gcr', 'gmres', 'ca-gcr', 'bca-gmres'
                    gpu TEXT,                              -- 'cuda', 'hip', 'cpu', NULL
                    config TEXT,                           -- JSON: build config or test parameters
                    results TEXT,                          -- JSON: extracted norms, metrics, errors
                    status TEXT DEFAULT 'running',         -- 'running', 'success', 'failed'
                    log_path TEXT,                         -- Path to actual log file
                    created_at TIMESTAMP,
                    tags TEXT                              -- Comma-separated tags for filtering
                )
            ''')
            
            # Create indexes for common queries
            conn.execute('CREATE INDEX IF NOT EXISTS idx_solver ON runs(solver)')
            conn.execute('CREATE INDEX IF NOT EXISTS idx_created ON runs(created_at)')
            conn.execute('CREATE INDEX IF NOT EXISTS idx_status ON runs(status)')
            conn.execute('CREATE INDEX IF NOT EXISTS idx_type ON runs(type)')
            
            conn.commit()
            logger.debug("Database schema initialized")
    
    @contextmanager
    def _get_connection(self):
        """Context manager for database connections."""
        conn = sqlite3.connect(str(self.db_path))
        conn.row_factory = sqlite3.Row  # Enable dict-like access to rows
        try:
            yield conn
        finally:
            conn.close()
    
    def add_build(self, solver: str, gpu: Optional[str], config: Dict[str, Any], 
                  log_content: str, tags: Optional[List[str]] = None) -> int:
        """Record a build run with log content."""
        # Save log file
        timestamp = _get_local_now().strftime("%Y%m%d_%H%M%S")
        log_filename = f"{timestamp}_{solver}"
        if gpu:
            log_filename += f"_{gpu}"
        log_filename += "_build.log"
        
        log_path = Path("logs/builds") / log_filename
        log_path.parent.mkdir(parents=True, exist_ok=True)
        log_path.write_text(log_content)
        
        # Extract build status from log content
        status = self._extract_build_status(log_content)
        
        # Add to database
        with self._get_connection() as conn:
            current_time = _get_local_now().strftime('%Y-%m-%d %H:%M:%S')
            cursor = conn.execute(
                """INSERT INTO runs 
                   (type, solver, gpu, config, status, log_path, tags, created_at) 
                   VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
                ('build', solver, gpu, json.dumps(config), status, 
                 str(log_path), ','.join(tags or []), current_time)
            )
            conn.commit()
            run_id = cursor.lastrowid
            
        logger.info(f"Build run recorded: ID={run_id}, solver={solver}, gpu={gpu}, status={status}")
        return run_id
    
    def add_test(self, solver: str, gpu: Optional[str], params: Dict[str, Any], 
                 log_content: str, tags: Optional[List[str]] = None) -> int:
        """Record a test run with extracted norms and results."""
        # Save log file
        timestamp = _get_local_now().strftime("%Y%m%d_%H%M%S")
        log_filename = f"{timestamp}_{solver}"
        if gpu:
            log_filename += f"_{gpu}"
        log_filename += "_test.log"
        
        log_path = Path("logs/tests") / log_filename
        log_path.parent.mkdir(parents=True, exist_ok=True)
        log_path.write_text(log_content)
        
        # Extract norms and test results
        norms = self._extract_norms(log_content)
        status = self._extract_test_status(log_content)
        
        results = {
            'norms': norms,
            'converged': status == 'success',
            'error_info': self._extract_errors(log_content)
        }
        
        # Add to database
        with self._get_connection() as conn:
            current_time = _get_local_now().strftime('%Y-%m-%d %H:%M:%S')
            cursor = conn.execute(
                """INSERT INTO runs 
                   (type, solver, gpu, config, results, status, log_path, tags, created_at) 
                   VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
                ('test', solver, gpu, json.dumps(params), json.dumps(results), 
                 status, str(log_path), ','.join(tags or []), current_time)
            )
            conn.commit()
            run_id = cursor.lastrowid
            
        logger.info(f"Test run recorded: ID={run_id}, solver={solver}, gpu={gpu}, status={status}")
        return run_id
    
    def _extract_build_status(self, content: str) -> str:
        """Extract build status from log content."""
        content_lower = content.lower()
        if "build failed" in content_lower or "error:" in content_lower:
            return "failed"
        elif "build succeeded" in content_lower or "finished" in content_lower:
            return "success"
        else:
            return "running"
    
    def _extract_test_status(self, content: str) -> str:
        """Extract test completion status from log content."""
        content_lower = content.lower()
        if "converged" in content_lower and "failed" not in content_lower:
            return "success"
        elif "failed" in content_lower or "error" in content_lower:
            return "failed"
        else:
            return "running"
    
    def _extract_norms(self, content: str) -> Dict[str, float]:
        """Extract norm values from solver log content."""
        norms = {}
        
        for line in content.splitlines():
            line = line.strip()
            if not line:
                continue
                
            # Parse lines with norm information
            # Examples: "residual_norm = 1.234e-5", "Final norm: 2.345e-8"
            if '=' in line and ('norm' in line.lower() or 'residual' in line.lower()):
                parts = line.split('=')
                if len(parts) == 2:
                    key = parts[0].strip().split()[-1]  # Get last word before =
                    try:
                        value = float(parts[1].strip().split()[0])  # Get first word after =
                        norms[key] = value
                    except (ValueError, IndexError):
                        continue
            
            # Parse colon-separated format: "Final norm: 1.234e-5"
            elif ':' in line and ('norm' in line.lower() or 'residual' in line.lower()):
                parts = line.split(':')
                if len(parts) == 2:
                    key = parts[0].strip().lower().replace(' ', '_')
                    try:
                        value = float(parts[1].strip().split()[0])
                        norms[key] = value
                    except (ValueError, IndexError):
                        continue
        
        return norms
    
    def _extract_errors(self, content: str) -> List[str]:
        """Extract error messages from log content."""
        errors = []
        for line in content.splitlines():
            line = line.strip()
            if 'error' in line.lower() or 'failed' in line.lower():
                errors.append(line)
        return errors[:5]  # Limit to first 5 errors
    
    def get_latest(self, n: int = 10, run_type: Optional[str] = None, 
                   solver: Optional[str] = None, gpu: Optional[str] = None) -> List[Dict[str, Any]]:
        """Get latest runs with optional filters."""
        query = "SELECT * FROM runs WHERE 1=1"
        params = []
        
        if run_type:
            query += " AND type = ?"
            params.append(run_type)
        if solver:
            query += " AND solver = ?"
            params.append(solver)
        if gpu:
            query += " AND gpu = ?"
            params.append(gpu)
        
        query += " ORDER BY created_at DESC LIMIT ?"
        params.append(n)
        
        with self._get_connection() as conn:
            cursor = conn.execute(query, params)
            rows = cursor.fetchall()
        
        # Convert to list of dicts with JSON parsing
        results = []
        for row in rows:
            result = dict(row)
            if result['config']:
                result['config'] = json.loads(result['config'])
            if result['results']:
                result['results'] = json.loads(result['results'])
            if result['tags']:
                result['tags'] = result['tags'].split(',')
            else:
                result['tags'] = []
            results.append(result)
        
        return results
    
    def get_by_id(self, run_id: int) -> Optional[Dict[str, Any]]:
        """Get a specific run by ID."""
        with self._get_connection() as conn:
            cursor = conn.execute("SELECT * FROM runs WHERE id = ?", (run_id,))
            row = cursor.fetchone()
        
        if not row:
            return None
        
        result = dict(row)
        if result['config']:
            result['config'] = json.loads(result['config'])
        if result['results']:
            result['results'] = json.loads(result['results'])
        if result['tags']:
            result['tags'] = result['tags'].split(',')
        else:
            result['tags'] = []
        
        return result
    
    def compare_norms(self, id1: int, id2: int) -> Optional[Dict[str, Any]]:
        """Compare norms between two test runs."""
        with self._get_connection() as conn:
            cursor = conn.execute(
                "SELECT id, results FROM runs WHERE id IN (?, ?) AND type = 'test'",
                (id1, id2)
            )
            rows = cursor.fetchall()
        
        if len(rows) != 2:
            logger.warning(f"Could not find both test runs {id1} and {id2}")
            return None
        
        # Parse results
        results1 = json.loads(rows[0]['results']) if rows[0]['results'] else {}
        results2 = json.loads(rows[1]['results']) if rows[1]['results'] else {}
        
        norms1 = results1.get('norms', {})
        norms2 = results2.get('norms', {})
        
        # Compare common norms
        comparison = {}
        common_keys = set(norms1.keys()) & set(norms2.keys())
        
        for key in common_keys:
            val1, val2 = norms1[key], norms2[key]
            diff = abs(val1 - val2)
            max_val = max(abs(val1), abs(val2), 1e-15)
            diff_pct = (diff / max_val) * 100
            
            comparison[key] = {
                'run1': val1,
                'run2': val2,
                'difference': diff,
                'difference_percent': diff_pct,
                'relative_error': diff / max_val
            }
        
        return {
            'run1_id': id1,
            'run2_id': id2,
            'comparisons': comparison,
            'summary': {
                'total_norms': len(common_keys),
                'max_difference_percent': max([c['difference_percent'] for c in comparison.values()]) if comparison else 0,
                'avg_difference_percent': sum([c['difference_percent'] for c in comparison.values()]) / len(comparison) if comparison else 0
            }
        }
    
    def search_runs(self, query: str, run_type: Optional[str] = None) -> List[Dict[str, Any]]:
        """Search runs by text query in config, results, or log path."""
        sql_query = """
        SELECT * FROM runs WHERE 1=1
        AND (config LIKE ? OR results LIKE ? OR log_path LIKE ? OR tags LIKE ?)
        """
        params = [f'%{query}%'] * 4
        
        if run_type:
            sql_query += " AND type = ?"
            params.append(run_type)
        
        sql_query += " ORDER BY created_at DESC LIMIT 50"
        
        with self._get_connection() as conn:
            cursor = conn.execute(sql_query, params)
            rows = cursor.fetchall()
        
        results = []
        for row in rows:
            result = dict(row)
            if result['config']:
                result['config'] = json.loads(result['config'])
            if result['results']:
                result['results'] = json.loads(result['results'])
            if result['tags']:
                result['tags'] = result['tags'].split(',')
            else:
                result['tags'] = []
            results.append(result)
        
        return results
    
    def get_statistics(self) -> Dict[str, Any]:
        """Get database statistics."""
        with self._get_connection() as conn:
            cursor = conn.execute("""
                SELECT 
                    COUNT(*) as total_runs,
                    SUM(CASE WHEN type='build' THEN 1 ELSE 0 END) as build_runs,
                    SUM(CASE WHEN type='test' THEN 1 ELSE 0 END) as test_runs,
                    SUM(CASE WHEN status='success' THEN 1 ELSE 0 END) as successful_runs,
                    SUM(CASE WHEN status='failed' THEN 1 ELSE 0 END) as failed_runs
                FROM runs
            """)
            stats = dict(cursor.fetchone())
            
            # Get solver breakdown
            cursor = conn.execute("""
                SELECT solver, COUNT(*) as count 
                FROM runs 
                GROUP BY solver 
                ORDER BY count DESC
            """)
            solver_stats = [dict(row) for row in cursor.fetchall()]
            
            # Get recent activity (last 7 days)
            cursor = conn.execute("""
                SELECT COUNT(*) as recent_runs
                FROM runs
                WHERE created_at >= datetime('now', '-7 days')
            """)
            recent_stats = dict(cursor.fetchone())
        
        return {
            'overview': stats,
            'solvers': solver_stats,
            'recent_activity': recent_stats,
            'database_path': str(self.db_path)
        }
    
    def cleanup_old_runs(self, days: int = 90) -> int:
        """Remove runs older than specified days. Returns number of deleted runs."""
        with self._get_connection() as conn:
            cursor = conn.execute("""
                SELECT COUNT(*) as count
                FROM runs 
                WHERE created_at < datetime('now', '-{} days')
            """.format(days))
            count = cursor.fetchone()[0]
            
            if count > 0:
                conn.execute("""
                    DELETE FROM runs 
                    WHERE created_at < datetime('now', '-{} days')
                """.format(days))
                conn.commit()
                logger.info(f"Cleaned up {count} runs older than {days} days")
        
        return count