"""
Data Flow Coordination Layer
Orchestrates the complete pipeline: ArXiv Scraper → Database → AI Classification → User Recommendations
"""

import asyncio
import logging
from datetime import datetime
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
import json

from config import get_config
from services.database_manager import DatabaseManager
from services.paper_manager import PaperManager

# Import service components
try:
    from arxiv_scraper_service.core.scraper_service import ArxivScraperService
    from arxiv_scraper_service.core.config_manager import ConfigManager as ScraperConfigManager
    scraper_available = True
except ImportError:
    scraper_available = False

try:
    from ai_classification_engine.core.classification_engine import AIClassificationEngine
    from ai_classification_engine.core.recommendation_engine import PersonalizedRecommendationEngine
    ai_available = True
except ImportError:
    ai_available = False

logger = logging.getLogger(__name__)

@dataclass
class PipelineMetrics:
    """Pipeline execution metrics"""
    papers_scraped: int = 0
    papers_classified: int = 0
    papers_stored: int = 0
    users_updated: int = 0
    processing_time_seconds: float = 0.0
    errors: List[str] = None
    
    def __post_init__(self):
        if self.errors is None:
            self.errors = []

class DataPipeline:
    """
    Coordinates the complete data flow pipeline for the ArXiv subscription platform
    """
    
    def __init__(self):
        self.config = get_config()
        self.db_manager = DatabaseManager()
        self.paper_manager = PaperManager()
        
        # Initialize services
        self.scraper_service = None
        self.ai_engine = None
        self.recommendation_engine = None
        
        self._initialize_services()
    
    def _initialize_services(self):
        """Initialize all pipeline services"""
        try:
            # Initialize scraper service
            if scraper_available:
                scraper_config_manager = ScraperConfigManager('arxiv_scraper_service/config.yaml')
                self.scraper_service = ArxivScraperService(scraper_config_manager.get_config())
                logger.info("ArXiv scraper service initialized")
            else:
                logger.warning("ArXiv scraper service not available")
            
            # Initialize AI services
            if ai_available:
                ai_config = {
                    "database": self.config.database.to_dict(),
                    "embedding": {
                        "model_name": self.config.ai.model_name,
                        "use_gpu": self.config.ai.use_gpu,
                        "batch_size": self.config.ai.batch_size,
                        "cache_embeddings": self.config.ai.cache_embeddings
                    },
                    "learning": {
                        "time_decay_factor": self.config.ai.time_decay_factor,
                        "min_interactions": self.config.ai.min_interactions,
                        "interaction_weights": self.config.ai.interaction_weights
                    }
                }
                
                self.ai_engine = AIClassificationEngine(ai_config, self.db_manager)
                self.recommendation_engine = PersonalizedRecommendationEngine(ai_config, self.db_manager)
                logger.info("AI classification engine initialized")
            else:
                logger.warning("AI classification engine not available")
                
        except Exception as e:
            logger.error(f"Service initialization failed: {e}")
    
    async def run_daily_pipeline(self) -> PipelineMetrics:
        """
        Run the complete daily processing pipeline
        """
        metrics = PipelineMetrics()
        start_time = datetime.now()
        
        logger.info("Starting daily data pipeline")
        
        try:
            # Step 1: Scrape new papers from ArXiv
            logger.info("Step 1: Scraping new papers from ArXiv")
            scraped_papers = await self._scrape_papers()
            metrics.papers_scraped = len(scraped_papers)
            
            if not scraped_papers:
                logger.info("No new papers found")
                return metrics
            
            # Step 2: Store papers in database
            logger.info(f"Step 2: Storing {len(scraped_papers)} papers in database")
            stored_papers = await self._store_papers(scraped_papers)
            metrics.papers_stored = len(stored_papers)
            
            # Step 3: Classify papers with AI
            if self.ai_engine and self.config.features.enable_recommendations:
                logger.info(f"Step 3: Classifying {len(stored_papers)} papers with AI")
                classified_papers = await self._classify_papers(stored_papers)
                metrics.papers_classified = len(classified_papers)
                
                # Step 4: Update user recommendations
                logger.info("Step 4: Updating user recommendations")
                updated_users = await self._update_user_recommendations(classified_papers)
                metrics.users_updated = len(updated_users)
            
            # Step 5: Trigger email notifications
            if self.config.features.enable_email_notifications:
                logger.info("Step 5: Triggering email notifications")
                await self._trigger_email_notifications()
            
        except Exception as e:
            error_msg = f"Pipeline execution failed: {e}"
            logger.error(error_msg)
            metrics.errors.append(error_msg)
        
        finally:
            end_time = datetime.now()
            metrics.processing_time_seconds = (end_time - start_time).total_seconds()
            
            logger.info(f"Daily pipeline completed in {metrics.processing_time_seconds:.2f} seconds")
            logger.info(f"Scraped: {metrics.papers_scraped}, Stored: {metrics.papers_stored}, "
                       f"Classified: {metrics.papers_classified}, Users updated: {metrics.users_updated}")
            
            # Store pipeline metrics
            await self._store_pipeline_metrics(metrics)
        
        return metrics
    
    async def _scrape_papers(self) -> List[Dict[str, Any]]:
        """Scrape new papers from ArXiv"""
        if not self.scraper_service:
            logger.warning("Scraper service not available, skipping paper scraping")
            return []
        
        try:
            # Run incremental scrape (only new papers since last run)
            results = self.scraper_service.run_incremental_scrape()
            
            if 'papers' in results:
                return results['papers']
            else:
                logger.warning("No papers returned from scraper service")
                return []
                
        except Exception as e:
            logger.error(f"Paper scraping failed: {e}")
            return []
    
    async def _store_papers(self, papers: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Store papers in database with deduplication"""
        stored_papers = []
        
        try:
            async with self.db_manager.get_connection() as conn:
                for paper in papers:
                    try:
                        # Check if paper already exists
                        existing_paper = await self.paper_manager.get_paper_by_arxiv_id(
                            conn, paper.get('arxiv_id')
                        )
                        
                        if existing_paper:
                            logger.debug(f"Paper {paper.get('arxiv_id')} already exists, skipping")
                            continue
                        
                        # Store new paper
                        stored_paper = await self.paper_manager.create_paper(conn, {
                            'arxiv_id': paper.get('arxiv_id'),
                            'title': paper.get('title'),
                            'authors': paper.get('authors', []),
                            'abstract': paper.get('abstract'),
                            'categories': paper.get('categories', []),
                            'published_date': paper.get('published_date'),
                            'updated_date': paper.get('updated_date'),
                            'pdf_url': paper.get('pdf_url'),
                            'doi': paper.get('doi'),
                            'journal_ref': paper.get('journal_ref'),
                            'primary_category': paper.get('primary_category'),
                            'metadata': paper.get('metadata', {})
                        })
                        
                        stored_papers.append(stored_paper)
                        
                    except Exception as e:
                        logger.error(f"Failed to store paper {paper.get('arxiv_id')}: {e}")
                        continue
        
        except Exception as e:
            logger.error(f"Paper storage failed: {e}")
        
        return stored_papers
    
    async def _classify_papers(self, papers: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Classify papers using AI engine"""
        if not self.ai_engine:
            logger.warning("AI engine not available, skipping classification")
            return papers
        
        classified_papers = []
        
        try:
            # Process papers in batches for efficiency
            batch_size = self.config.ai.batch_size
            
            for i in range(0, len(papers), batch_size):
                batch = papers[i:i + batch_size]
                
                for paper in batch:
                    try:
                        # Classify paper
                        classification = self.ai_engine.classify_paper({
                            'title': paper.get('title'),
                            'abstract': paper.get('abstract'),
                            'arxiv_id': paper.get('arxiv_id'),
                            'categories': paper.get('categories', [])
                        })
                        
                        # Store classification results
                        paper['classification'] = classification
                        
                        # Save to database
                        await self._store_paper_classification(paper['id'], classification)
                        
                        classified_papers.append(paper)
                        
                    except Exception as e:
                        logger.error(f"Classification failed for paper {paper.get('arxiv_id')}: {e}")
                        classified_papers.append(paper)  # Include paper even if classification fails
                        continue
        
        except Exception as e:
            logger.error(f"Batch classification failed: {e}")
        
        return classified_papers
    
    async def _store_paper_classification(self, paper_id: str, classification: Dict[str, Any]):
        """Store paper classification in database"""
        try:
            async with self.db_manager.get_connection() as conn:
                async with conn.cursor() as cur:
                    await cur.execute("""
                        INSERT INTO paper_classifications 
                        (paper_id, semantic_topics, technical_categories, methodologies, 
                         application_domains, confidence_scores, classification_metadata, created_at)
                        VALUES (%s, %s, %s, %s, %s, %s, %s, NOW())
                        ON CONFLICT (paper_id) DO UPDATE SET
                            semantic_topics = EXCLUDED.semantic_topics,
                            technical_categories = EXCLUDED.technical_categories,
                            methodologies = EXCLUDED.methodologies,
                            application_domains = EXCLUDED.application_domains,
                            confidence_scores = EXCLUDED.confidence_scores,
                            classification_metadata = EXCLUDED.classification_metadata,
                            updated_at = NOW()
                    """, (
                        paper_id,
                        json.dumps(classification.get('semantic_topics', [])),
                        json.dumps(classification.get('technical_categories', [])),
                        json.dumps(classification.get('methodologies', [])),
                        json.dumps(classification.get('application_domains', [])),
                        json.dumps(classification.get('confidence_scores', {})),
                        json.dumps(classification.get('metadata', {}))
                    ))
                    
        except Exception as e:
            logger.error(f"Failed to store classification for paper {paper_id}: {e}")
    
    async def _update_user_recommendations(self, papers: List[Dict[str, Any]]) -> List[str]:
        """Update recommendations for all active users"""
        if not self.recommendation_engine:
            logger.warning("Recommendation engine not available, skipping recommendation updates")
            return []
        
        updated_users = []
        
        try:
            # Get all active users
            active_users = await self._get_active_users()
            
            for user_id in active_users:
                try:
                    # Generate updated recommendations
                    recommendations = self.recommendation_engine.get_recommendations(
                        user_id, count=50  # Generate more recommendations for better caching
                    )
                    
                    # Store recommendations
                    await self._store_user_recommendations(user_id, recommendations)
                    updated_users.append(user_id)
                    
                except Exception as e:
                    logger.error(f"Failed to update recommendations for user {user_id}: {e}")
                    continue
        
        except Exception as e:
            logger.error(f"User recommendation update failed: {e}")
        
        return updated_users
    
    async def _get_active_users(self) -> List[str]:
        """Get list of active users for recommendation updates"""
        try:
            async with self.db_manager.get_connection() as conn:
                async with conn.cursor() as cur:
                    await cur.execute("""
                        SELECT user_id FROM user_profiles 
                        WHERE is_active = true 
                        AND last_login > NOW() - INTERVAL '30 days'
                    """)
                    
                    results = await cur.fetchall()
                    return [row['user_id'] for row in results]
                    
        except Exception as e:
            logger.error(f"Failed to get active users: {e}")
            return []
    
    async def _store_user_recommendations(self, user_id: str, recommendations: List[Dict[str, Any]]):
        """Store user recommendations in database"""
        try:
            async with self.db_manager.get_connection() as conn:
                async with conn.cursor() as cur:
                    # Clear existing recommendations
                    await cur.execute("""
                        DELETE FROM user_recommendations 
                        WHERE user_id = %s
                    """, (user_id,))
                    
                    # Insert new recommendations
                    for i, rec in enumerate(recommendations):
                        await cur.execute("""
                            INSERT INTO user_recommendations 
                            (user_id, paper_id, rank, score, reasons, created_at)
                            VALUES (%s, %s, %s, %s, %s, NOW())
                        """, (
                            user_id,
                            rec.get('paper_id'),
                            i + 1,
                            rec.get('score', 0.0),
                            json.dumps(rec.get('reasons', []))
                        ))
                        
        except Exception as e:
            logger.error(f"Failed to store recommendations for user {user_id}: {e}")
    
    async def _trigger_email_notifications(self):
        """Trigger email notifications for users based on their preferences"""
        try:
            # Add notification tasks to queue
            # This would integrate with the email service
            logger.info("Email notifications triggered (implementation pending)")
            pass
            
        except Exception as e:
            logger.error(f"Email notification trigger failed: {e}")
    
    async def _store_pipeline_metrics(self, metrics: PipelineMetrics):
        """Store pipeline execution metrics for monitoring"""
        try:
            async with self.db_manager.get_connection() as conn:
                async with conn.cursor() as cur:
                    await cur.execute("""
                        INSERT INTO pipeline_metrics 
                        (execution_date, papers_scraped, papers_classified, papers_stored, 
                         users_updated, processing_time_seconds, errors, created_at)
                        VALUES (CURRENT_DATE, %s, %s, %s, %s, %s, %s, NOW())
                        ON CONFLICT (execution_date) DO UPDATE SET
                            papers_scraped = EXCLUDED.papers_scraped,
                            papers_classified = EXCLUDED.papers_classified,
                            papers_stored = EXCLUDED.papers_stored,
                            users_updated = EXCLUDED.users_updated,
                            processing_time_seconds = EXCLUDED.processing_time_seconds,
                            errors = EXCLUDED.errors,
                            updated_at = NOW()
                    """, (
                        metrics.papers_scraped,
                        metrics.papers_classified,
                        metrics.papers_stored,
                        metrics.users_updated,
                        metrics.processing_time_seconds,
                        json.dumps(metrics.errors)
                    ))
                    
        except Exception as e:
            logger.error(f"Failed to store pipeline metrics: {e}")
    
    async def process_single_paper(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """Process a single paper through the complete pipeline (for real-time processing)"""
        try:
            # Store paper
            stored_papers = await self._store_papers([paper_data])
            if not stored_papers:
                return {"success": False, "error": "Failed to store paper"}
            
            paper = stored_papers[0]
            
            # Classify paper
            if self.ai_engine:
                classified_papers = await self._classify_papers([paper])
                if classified_papers:
                    paper = classified_papers[0]
            
            return {"success": True, "paper": paper}
            
        except Exception as e:
            logger.error(f"Single paper processing failed: {e}")
            return {"success": False, "error": str(e)}
    
    async def get_pipeline_status(self) -> Dict[str, Any]:
        """Get current pipeline status and metrics"""
        try:
            async with self.db_manager.get_connection() as conn:
                async with conn.cursor() as cur:
                    # Get latest pipeline metrics
                    await cur.execute("""
                        SELECT * FROM pipeline_metrics 
                        ORDER BY execution_date DESC 
                        LIMIT 1
                    """)
                    
                    latest_metrics = await cur.fetchone()
                    
                    # Get service status
                    status = {
                        "services": {
                            "scraper": self.scraper_service is not None,
                            "ai_classification": self.ai_engine is not None,
                            "recommendation": self.recommendation_engine is not None,
                            "database": await self.db_manager.check_connection()
                        },
                        "latest_execution": dict(latest_metrics) if latest_metrics else None,
                        "configuration": {
                            "scraper_enabled": scraper_available,
                            "ai_enabled": ai_available,
                            "recommendations_enabled": self.config.features.enable_recommendations,
                            "email_notifications_enabled": self.config.features.enable_email_notifications
                        }
                    }
                    
                    return status
                    
        except Exception as e:
            logger.error(f"Failed to get pipeline status: {e}")
            return {
                "services": {
                    "scraper": False,
                    "ai_classification": False,
                    "recommendation": False,
                    "database": False
                },
                "error": str(e)
            }