#!/usr/bin/env python3
"""
Data Consistency Check Script for UmaInsight
Identifies and reports synchronization issues between metrics and telemetry_analysis tables

Usage:
    python scripts/check_data_consistency.py [--fix] [--verbose]

Options:
    --fix     Attempt to fix identified inconsistencies (requires confirmation)
    --verbose Show detailed output for all records
    --summary Show only summary statistics
"""

import argparse
import asyncio
import json
import sqlite3
import sys
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass
from contextlib import asynccontextmanager

# Add project root to path
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from sqlalchemy import select, text, and_, or_
from app.core.config import settings
from app.models.extractor import Metric
from app.models.business import TelemetryAnalysis

@dataclass
class ConsistencyIssue:
    """Represents a data consistency issue"""
    metric_id: int
    metric_name: str
    issue_type: str
    metrics_status: Optional[str]
    analysis_status: Optional[str]
    metrics_analyzed_at: Optional[str]
    analysis_analyzed_at: Optional[str]
    metrics_analyzed_by: Optional[str]
    analysis_analyzed_by: Optional[str]
    description: str

@dataclass
class ConsistencyReport:
    """Summary of consistency check results"""
    total_metrics: int
    total_analysis_records: int
    synchronized_records: int
    missing_analysis_records: int
    inconsistent_status_records: int
    inconsistent_timestamp_records: int
    issues: List[ConsistencyIssue]

class DataConsistencyChecker:
    """Checks data consistency between metrics and telemetry_analysis tables"""

    def __init__(self):
        self.extractor_engine = None
        self.business_engine = None

    async def initialize(self):
        """Initialize database connections"""
        # Database paths
        extractor_db_path = project_root / "data" / "uma_insight.db"
        business_db_path = project_root / "data" / "uma_insight.db"

        # Create async engines
        extractor_url = f"sqlite+aiosqlite:///{extractor_db_path}"
        business_url = f"sqlite+aiosqlite:///{business_db_path}"

        self.extractor_engine = create_async_engine(
            extractor_url,
            echo=False,
            future=True
        )
        self.business_engine = create_async_engine(
            business_url,
            echo=False,
            future=True
        )

    async def cleanup(self):
        """Clean up database connections"""
        if self.extractor_engine:
            await self.extractor_engine.dispose()
        if self.business_engine:
            await self.business_engine.dispose()

    @asynccontextmanager
    async def get_sessions(self):
        """Context manager for database sessions"""
        extractor_sessionmaker = sessionmaker(
            self.extractor_engine, class_=AsyncSession
        )
        business_sessionmaker = sessionmaker(
            self.business_engine, class_=AsyncSession
        )

        async with extractor_sessionmaker() as extractor_session:
            async with business_sessionmaker() as business_session:
                yield extractor_session, business_session

    async def get_overview_stats(self) -> Dict:
        """Get overview statistics for both tables"""
        async with self.get_sessions() as (extractor_db, business_db):
            # Get metrics table stats
            metrics_query = text("""
                SELECT
                    COUNT(*) as total_metrics,
                    COUNT(CASE WHEN analysis_status IS NOT NULL AND analysis_status != 'NOT_ANALYZED' THEN 1 END) as analyzed_metrics,
                    COUNT(CASE WHEN analysis_status = 'NOT_ANALYZED' THEN 1 END) as unanalyzed_metrics,
                    COUNT(CASE WHEN analysis_status = 'ANALYZED' THEN 1 END) as analyzed_count,
                    COUNT(CASE WHEN analysis_status = 'NOT_NEEDED' THEN 1 END) as not_needed_count,
                    COUNT(CASE WHEN analysis_status = 'NEEDS_COLLECTION' THEN 1 END) as needs_collection_count
                FROM metrics
            """)
            metrics_result = await extractor_db.execute(metrics_query)
            metrics_row = metrics_result.fetchone()
            metrics_stats = dict(metrics_row._mapping) if metrics_row else {
                'total_metrics': 0, 'analyzed_metrics': 0, 'unanalyzed_metrics': 0,
                'analyzed_count': 0, 'not_needed_count': 0, 'needs_collection_count': 0
            }

            # Get telemetry_analysis table stats
            analysis_query = text("""
                SELECT
                    COUNT(*) as total_analysis_records,
                    COUNT(CASE WHEN analysis_status = 'ANALYZED' THEN 1 END) as analyzed_count,
                    COUNT(CASE WHEN analysis_status = 'NOT_NEEDED' THEN 1 END) as not_needed_count,
                    COUNT(CASE WHEN analysis_status = 'NEEDS_COLLECTION' THEN 1 END) as needs_collection_count
                FROM telemetry_analysis
            """)
            analysis_result = await business_db.execute(analysis_query)
            analysis_row = analysis_result.fetchone()
            analysis_stats = dict(analysis_row._mapping) if analysis_row else {
                'total_analysis_records': 0, 'analyzed_count': 0, 'not_needed_count': 0, 'needs_collection_count': 0
            }

            return {
                'metrics': metrics_stats,
                'analysis': analysis_stats
            }

    async def check_missing_analysis_records(self) -> List[ConsistencyIssue]:
        """Find metrics that don't have corresponding analysis records"""
        issues = []

        async with self.get_sessions() as (extractor_db, business_db):
            # Find metrics with analysis_status not NOT_ANALYZED but no telemetry_analysis record
            query = text("""
                SELECT m.id, m.name, m.analysis_status, m.analysis_notes, m.analyzed_at, m.analyzed_by
                FROM metrics m
                LEFT JOIN telemetry_analysis ta ON m.id = ta.metric_id
                WHERE m.analysis_status != 'NOT_ANALYZED'
                AND m.analysis_status IS NOT NULL
                AND ta.id IS NULL
                ORDER BY m.id
                LIMIT 1000
            """)

            result = await extractor_db.execute(query)
            for row in result:
                issues.append(ConsistencyIssue(
                    metric_id=row.id,
                    metric_name=row.name,
                    issue_type="MISSING_ANALYSIS_RECORD",
                    metrics_status=row.analysis_status,
                    analysis_status=None,
                    metrics_analyzed_at=str(row.analyzed_at) if row.analyzed_at else None,
                    analysis_analyzed_at=None,
                    metrics_analyzed_by=row.analyzed_by,
                    analysis_analyzed_by=None,
                    description=f"Metric has {row.analysis_status} status but no telemetry_analysis record"
                ))

        return issues

    async def check_inconsistent_status(self) -> List[ConsistencyIssue]:
        """Find records with inconsistent analysis_status between tables"""
        issues = []

        async with self.get_sessions() as (extractor_db, business_db):
            query = text("""
                SELECT
                    m.id as metric_id,
                    m.name as metric_name,
                    m.analysis_status as metrics_status,
                    m.analyzed_at as metrics_analyzed_at,
                    m.analyzed_by as metrics_analyzed_by,
                    ta.analysis_status as analysis_status,
                    ta.analyzed_at as analysis_analyzed_at,
                    ta.analyzed_by as analysis_analyzed_by
                FROM metrics m
                INNER JOIN telemetry_analysis ta ON m.id = ta.metric_id
                WHERE m.analysis_status != ta.analysis_status
                OR (m.analysis_status IS NULL AND ta.analysis_status IS NOT NULL)
                OR (m.analysis_status IS NOT NULL AND ta.analysis_status IS NULL)
                ORDER BY m.id
                LIMIT 1000
            """)

            result = await extractor_db.execute(query)
            for row in result:
                issues.append(ConsistencyIssue(
                    metric_id=row.metric_id,
                    metric_name=row.metric_name,
                    issue_type="INCONSISTENT_STATUS",
                    metrics_status=row.metrics_status,
                    analysis_status=row.analysis_status,
                    metrics_analyzed_at=str(row.metrics_analyzed_at) if row.metrics_analyzed_at else None,
                    analysis_analyzed_at=str(row.analysis_analyzed_at) if row.analysis_analyzed_at else None,
                    metrics_analyzed_by=row.metrics_analyzed_by,
                    analysis_analyzed_by=row.analysis_analyzed_by,
                    description=f"Status mismatch: metrics={row.metrics_status}, analysis={row.analysis_status}"
                ))

        return issues

    async def check_inconsistent_timestamps(self) -> List[ConsistencyIssue]:
        """Find records with significantly different analyzed_at timestamps"""
        issues = []

        async with self.get_sessions() as (extractor_db, business_db):
            query = text("""
                SELECT
                    m.id as metric_id,
                    m.name as metric_name,
                    m.analysis_status as metrics_status,
                    m.analyzed_at as metrics_analyzed_at,
                    m.analyzed_by as metrics_analyzed_by,
                    ta.analysis_status as analysis_status,
                    ta.analyzed_at as analysis_analyzed_at,
                    ta.analyzed_by as analysis_analyzed_by,
                    ABS(JULIANDAY(m.analyzed_at) - JULIANDAY(ta.analyzed_at)) * 24 * 60 as minutes_diff
                FROM metrics m
                INNER JOIN telemetry_analysis ta ON m.id = ta.metric_id
                WHERE m.analyzed_at IS NOT NULL
                AND ta.analyzed_at IS NOT NULL
                AND ABS(JULIANDAY(m.analyzed_at) - JULIANDAY(ta.analyzed_at)) * 24 * 60 > 5
                ORDER BY minutes_diff DESC
                LIMIT 100
            """)

            result = await extractor_db.execute(query)
            for row in result:
                issues.append(ConsistencyIssue(
                    metric_id=row.metric_id,
                    metric_name=row.metric_name,
                    issue_type="INCONSISTENT_TIMESTAMPS",
                    metrics_status=row.metrics_status,
                    analysis_status=row.analysis_status,
                    metrics_analyzed_at=str(row.metrics_analyzed_at),
                    analysis_analyzed_at=str(row.analysis_analyzed_at),
                    metrics_analyzed_by=row.metrics_analyzed_by,
                    analysis_analyzed_by=row.analysis_analyzed_by,
                    description=f"Timestamps differ by {row.minutes_diff:.1f} minutes"
                ))

        return issues

    async def run_full_check(self) -> ConsistencyReport:
        """Run comprehensive consistency check"""
        print("🔍 Starting comprehensive data consistency check...")

        # Get overview stats
        stats = await self.get_overview_stats()

        # Run all checks
        missing_analysis = await self.check_missing_analysis_records()
        inconsistent_status = await self.check_inconsistent_status()
        inconsistent_timestamps = await self.check_inconsistent_timestamps()

        # Calculate statistics
        total_metrics = stats['metrics']['total_metrics']
        total_analysis_records = stats['analysis']['total_analysis_records']

        # Synchronized records = metrics with matching analysis records
        synchronized_records = total_metrics - len(missing_analysis) - len(inconsistent_status)

        all_issues = missing_analysis + inconsistent_status + inconsistent_timestamps

        return ConsistencyReport(
            total_metrics=total_metrics,
            total_analysis_records=total_analysis_records,
            synchronized_records=synchronized_records,
            missing_analysis_records=len(missing_analysis),
            inconsistent_status_records=len(inconsistent_status),
            inconsistent_timestamp_records=len(inconsistent_timestamps),
            issues=all_issues
        )

    async def fix_missing_analysis_records(self, dry_run: bool = True) -> int:
        """Fix missing analysis records by creating them from metrics data"""
        if dry_run:
            print("🔍 DRY RUN: Would create missing analysis records")
            return 0

        print("🔧 Creating missing analysis records...")
        fixed_count = 0

        async with self.get_sessions() as (extractor_db, business_db):
            # Get metrics that need analysis records
            query = text("""
                SELECT id, name, analysis_status, analysis_notes, analyzed_at, analyzed_by
                FROM metrics
                WHERE analysis_status IS NOT NULL
                AND analysis_status != 'NOT_ANALYZED'
                AND id NOT IN (SELECT metric_id FROM telemetry_analysis)
            """)

            result = await extractor_db.execute(query)
            for row in result:
                try:
                    # Create analysis record
                    insert_query = text("""
                        INSERT INTO telemetry_analysis
                        (id, metric_id, analysis_status, analysis_notes, analyzed_by, analyzed_at, created_at, updated_at)
                        VALUES (hex(randomblob(16)), :metric_id, :analysis_status, :analysis_notes,
                                :analyzed_by, :analyzed_at, datetime('now'), datetime('now'))
                    """)

                    await business_db.execute(insert_query, {
                        'metric_id': row.id,
                        'analysis_status': row.analysis_status,
                        'analysis_notes': row.analysis_notes,
                        'analyzed_by': row.analyzed_by,
                        'analyzed_at': row.analyzed_at
                    })

                    fixed_count += 1

                except Exception as e:
                    print(f"❌ Failed to create analysis record for metric {row.id}: {e}")

            await business_db.commit()

        print(f"✅ Created {fixed_count} missing analysis records")
        return fixed_count

    def print_report(self, report: ConsistencyReport, verbose: bool = False, summary_only: bool = False):
        """Print consistency check report"""
        print("\n" + "="*80)
        print("📊 DATA CONSISTENCY REPORT")
        print("="*80)

        print(f"\n📈 OVERVIEW STATISTICS:")
        print(f"   Total metrics: {report.total_metrics:,}")
        print(f"   Total analysis records: {report.total_analysis_records:,}")
        print(f"   Synchronized records: {report.synchronized_records:,}")
        print(f"   Synchronization rate: {(report.synchronized_records / report.total_metrics * 100):.1f}%")

        print(f"\n⚠️  ISSUES FOUND:")
        print(f"   Missing analysis records: {report.missing_analysis_records:,}")
        print(f"   Inconsistent status records: {report.inconsistent_status_records:,}")
        print(f"   Inconsistent timestamp records: {report.inconsistent_timestamp_records:,}")
        print(f"   Total issues: {len(report.issues):,}")

        if summary_only:
            return

        if report.issues and verbose:
            print(f"\n🔍 DETAILED ISSUES:")
            for i, issue in enumerate(report.issues[:20], 1):  # Limit to first 20 for readability
                print(f"\n{i}. {issue.issue_type}:")
                print(f"   Metric ID: {issue.metric_id}")
                print(f"   Metric Name: {issue.metric_name}")
                print(f"   Description: {issue.description}")
                if issue.metrics_status != issue.analysis_status:
                    print(f"   Status: metrics={issue.metrics_status} vs analysis={issue.analysis_status}")
                if issue.metrics_analyzed_at != issue.analysis_analyzed_at:
                    print(f"   Analyzed At: metrics={issue.metrics_analyzed_at} vs analysis={issue.analysis_analyzed_at}")

        if not report.issues:
            print(f"\n✅ No data consistency issues found!")
        else:
            print(f"\n❌ {len(report.issues)} data consistency issues detected. Run with --fix to attempt repairs.")

async def main():
    parser = argparse.ArgumentParser(description="Check data consistency between metrics and telemetry_analysis tables")
    parser.add_argument("--fix", action="store_true", help="Attempt to fix identified inconsistencies")
    parser.add_argument("--verbose", action="store_true", help="Show detailed output")
    parser.add_argument("--summary", action="store_true", help="Show summary statistics only")
    parser.add_argument("--dry-run", action="store_true", help="Show what would be fixed without making changes")

    args = parser.parse_args()

    checker = DataConsistencyChecker()

    try:
        await checker.initialize()

        # Run consistency check
        report = await checker.run_full_check()

        # Print report
        checker.print_report(report, verbose=args.verbose, summary_only=args.summary)

        # Fix issues if requested
        if args.fix:
            if args.dry_run:
                await checker.fix_missing_analysis_records(dry_run=True)
            else:
                confirmation = input(f"\n🔧 This will attempt to fix {len(report.issues)} issues. Continue? (y/N): ")
                if confirmation.lower() == 'y':
                    fixed_count = await checker.fix_missing_analysis_records(dry_run=False)
                    print(f"✅ Fixed {fixed_count} issues")
                else:
                    print("❌ Fix operation cancelled")

    finally:
        await checker.cleanup()

    # Exit with error code if issues found
    sys.exit(1 if report.issues else 0)

if __name__ == "__main__":
    asyncio.run(main())