from typing import List, Dict, Any, Optional, Union
import os
import json
import pickle
from pathlib import Path
from datetime import datetime
import uuid
import pandas as pd
import numpy as np
from pyspark.sql import DataFrame
from pyspark.sql.functions import col, lit
from pydantic import BaseModel
import redis
import logging

from ..core.spark_engine import SparkVectorEngine
from ..core.vector_operations import VectorOperations
from ..core.geospatial_engine import GeospatialEngine


class DatasetInfo(BaseModel):
    """Dataset information model."""
    id: str
    name: str
    description: Optional[str] = None
    file_path: str
    vector_columns: List[str]
    shape: Dict[str, int]
    created_at: datetime
    metadata: Optional[Dict[str, Any]] = None


class JobInfo(BaseModel):
    """Job information model."""
    id: str
    type: str
    status: str  # 'pending', 'running', 'completed', 'failed'
    input_dataset_id: str
    parameters: Dict[str, Any]
    result_path: Optional[str] = None
    error_message: Optional[str] = None
    created_at: datetime
    completed_at: Optional[datetime] = None
    metadata: Optional[Dict[str, Any]] = None


class VectorDataService:
    """Service for managing vector datasets and computations."""

    def __init__(self,
                 data_dir: str = "./data",
                 redis_url: str = "redis://localhost:6379",
                 spark_config: Optional[Dict[str, str]] = None):
        """
        Initialize data service.

        Args:
            data_dir: Directory for data storage
            redis_url: Redis connection URL
            spark_config: Spark configuration
        """
        self.data_dir = Path(data_dir)
        self.data_dir.mkdir(exist_ok=True)

        # Initialize Spark engine
        self.spark_engine = SparkVectorEngine(
            app_name="VectorAnalyticsService",
            config=spark_config
        )

        # Initialize vector operations
        self.vector_ops = VectorOperations(self.spark_engine)

        # Initialize geospatial engine
        self.geo_engine = GeospatialEngine(self.spark_engine.spark)

        # Initialize Redis for caching and job tracking
        try:
            self.redis_client = redis.from_url(redis_url)
            self.redis_client.ping()
            self.redis_available = True
        except Exception as e:
            logging.warning(f"Redis not available: {e}")
            self.redis_available = False

        self.logger = logging.getLogger(__name__)

    def upload_dataset(self,
                      file_path: str,
                      name: str,
                      vector_columns: List[str],
                      description: Optional[str] = None,
                      file_format: str = "csv") -> DatasetInfo:
        """
        Upload and register a new dataset.

        Args:
            file_path: Path to data file
            name: Dataset name
            vector_columns: List of vector column names
            description: Dataset description
            file_format: File format (csv, json, parquet)

        Returns:
            DatasetInfo object
        """
        dataset_id = str(uuid.uuid4())

        # Copy file to data directory
        file_name = Path(file_path).name
        target_path = self.data_dir / f"{dataset_id}_{file_name}"

        if file_path != str(target_path):
            import shutil
            shutil.copy2(file_path, target_path)

        try:
            # Load data with Spark to get shape
            df = self.spark_engine.load_vector_data(
                str(target_path),
                vector_columns,
                format=file_format
            )

            count = df.count()
            vector_size = len(df.select("features").first()[0])

            shape = {"n_samples": count, "n_dimensions": vector_size}

            # Create dataset info
            dataset_info = DatasetInfo(
                id=dataset_id,
                name=name,
                description=description,
                file_path=str(target_path),
                vector_columns=vector_columns,
                shape=shape,
                created_at=datetime.now(),
                metadata={"format": file_format}
            )

            # Save dataset info
            info_path = self.data_dir / f"{dataset_id}_info.json"
            with open(info_path, 'w') as f:
                json.dump(dataset_info.dict(), f, default=str, indent=2)

            # Cache dataset info in Redis
            if self.redis_available:
                self.redis_client.setex(
                    f"dataset:{dataset_id}",
                    3600,  # 1 hour TTL
                    json.dumps(dataset_info.dict(), default=str)
                )

            self.logger.info(f"Uploaded dataset: {name} (ID: {dataset_id})")
            return dataset_info

        except Exception as e:
            # Clean up on failure
            if target_path.exists():
                target_path.unlink()
            raise Exception(f"Failed to upload dataset: {str(e)}")

    def load_dataset(self, dataset_id: str) -> DataFrame:
        """
        Load dataset by ID.

        Args:
            dataset_id: Dataset ID

        Returns:
            Spark DataFrame
        """
        # Try to get from cache first
        if self.redis_available:
            cached_info = self.redis_client.get(f"dataset:{dataset_id}")
            if cached_info:
                dataset_info = DatasetInfo(**json.loads(cached_info))
            else:
                dataset_info = self._load_dataset_info(dataset_id)
        else:
            dataset_info = self._load_dataset_info(dataset_id)

        if not dataset_info:
            raise ValueError(f"Dataset not found: {dataset_id}")

        return self.spark_engine.load_vector_data(
            dataset_info.file_path,
            dataset_info.vector_columns,
            format=dataset_info.metadata.get("format", "csv")
        )

    def list_datasets(self) -> List[DatasetInfo]:
        """
        List all available datasets.

        Returns:
            List of DatasetInfo objects
        """
        datasets = []

        for info_file in self.data_dir.glob("*_info.json"):
            try:
                with open(info_file, 'r') as f:
                    dataset_dict = json.load(f)
                dataset_info = DatasetInfo(**dataset_dict)
                datasets.append(dataset_info)
            except Exception as e:
                self.logger.warning(f"Failed to load dataset info from {info_file}: {e}")

        return datasets

    def submit_job(self,
                  job_type: str,
                  input_dataset_id: str,
                  parameters: Dict[str, Any],
                  name: Optional[str] = None) -> JobInfo:
        """
        Submit a computation job.

        Args:
            job_type: Type of job (e.g., 'kmeans', 'pca', 'similarity')
            input_dataset_id: Input dataset ID
            parameters: Job parameters
            name: Optional job name

        Returns:
            JobInfo object
        """
        job_id = str(uuid.uuid4())

        job_info = JobInfo(
            id=job_id,
            type=job_type,
            status="pending",
            input_dataset_id=input_dataset_id,
            parameters=parameters,
            created_at=datetime.now(),
            metadata={"name": name} if name else None
        )

        # Save job info
        job_info_path = self.data_dir / f"{job_id}_job.json"
        with open(job_info_path, 'w') as f:
            json.dump(job_info.dict(), f, default=str, indent=2)

        # Cache job info in Redis
        if self.redis_available:
            self.redis_client.setex(
                f"job:{job_id}",
                86400,  # 24 hour TTL
                json.dumps(job_info.dict(), default=str)
            )

        # Queue job for processing
        self._queue_job(job_id)

        self.logger.info(f"Submitted job: {job_type} (ID: {job_id})")
        return job_info

    def get_job_status(self, job_id: str) -> Optional[JobInfo]:
        """
        Get job status.

        Args:
            job_id: Job ID

        Returns:
            JobInfo object or None
        """
        # Try Redis cache first
        if self.redis_available:
            cached_job = self.redis_client.get(f"job:{job_id}")
            if cached_job:
                job_dict = json.loads(cached_job)
                return JobInfo(**job_dict)

        # Load from file
        job_info_path = self.data_dir / f"{job_id}_job.json"
        if job_info_path.exists():
            with open(job_info_path, 'r') as f:
                job_dict = json.load(f)
            return JobInfo(**job_dict)

        return None

    def execute_job(self, job_id: str) -> Dict[str, Any]:
        """
        Execute a job synchronously.

        Args:
            job_id: Job ID

        Returns:
            Execution results
        """
        job_info = self.get_job_status(job_id)
        if not job_info:
            raise ValueError(f"Job not found: {job_id}")

        if job_info.status != "pending":
            raise ValueError(f"Job not in pending status: {job_id}")

        try:
            # Update job status
            self._update_job_status(job_id, "running")

            # Load input data
            df = self.load_dataset(job_info.input_dataset_id)

            # Execute based on job type
            if job_info.type == "kmeans":
                result = self._execute_kmeans(df, job_info.parameters)
            elif job_info.type == "pca":
                result = self._execute_pca(df, job_info.parameters)
            elif job_info.type == "similarity":
                result = self._execute_similarity(df, job_info.parameters)
            elif job_info.type == "outlier_detection":
                result = self._execute_outlier_detection(df, job_info.parameters)
            elif job_info.type == "statistics":
                result = self._execute_statistics(df, job_info.parameters)
            elif job_info.type == "nearest_neighbors":
                result = self._execute_nearest_neighbors(df, job_info.parameters)
            else:
                raise ValueError(f"Unsupported job type: {job_info.type}")

            # Save result
            result_path = self._save_job_result(job_id, result)

            # Update job status
            self._update_job_status(
                job_id,
                "completed",
                result_path=result_path
            )

            return result

        except Exception as e:
            error_msg = str(e)
            self._update_job_status(job_id, "failed", error_message=error_msg)
            raise Exception(f"Job execution failed: {error_msg}")

    def _execute_kmeans(self, df: DataFrame, params: Dict[str, Any]) -> Dict[str, Any]:
        """Execute K-means clustering."""
        k = params.get("k", 3)
        max_iter = params.get("max_iter", 100)
        seed = params.get("seed", 42)

        result_df = self.vector_ops.engine.kmeans_clustering(
            df,
            k=k,
            max_iter=max_iter,
            seed=seed
        )

        # Collect statistics
        cluster_counts = result_df.groupBy("cluster").count().toPandas()
        silhouette_score = 0.0  # Would need to compute actual silhouette score

        return {
            "type": "kmeans",
            "parameters": {"k": k, "max_iter": max_iter, "seed": seed},
            "result": {
                "cluster_counts": cluster_counts.to_dict("records"),
                "silhouette_score": silhouette_score,
                "n_clusters": k
            },
            "data_sample": result_df.limit(10).toPandas().to_dict("records")
        }

    def _execute_pca(self, df: DataFrame, params: Dict[str, Any]) -> Dict[str, Any]:
        """Execute PCA dimensionality reduction."""
        k = params.get("k", 10)

        result_df = self.vector_ops.engine.pca_dimensionality_reduction(df, k=k)

        # Get explained variance
        # Note: Would need to extract from PCA model for actual values

        return {
            "type": "pca",
            "parameters": {"k": k},
            "result": {
                "n_components": k,
                "explained_variance": [0.1] * k,  # Placeholder
                "original_dimensions": len(df.select("features").first()[0])
            },
            "data_sample": result_df.limit(10).toPandas().to_dict("records")
        }

    def _execute_similarity(self, df: DataFrame, params: Dict[str, Any]) -> Dict[str, Any]:
        """Execute similarity computation."""
        metric = params.get("metric", "cosine")
        threshold = params.get("threshold", None)
        sample_size = params.get("sample_size", 1000)

        # Sample data for similarity computation (full matrix can be very large)
        sample_df = df.limit(sample_size)
        similarity_df = self.vector_ops.engine.compute_similarity_matrix(
            sample_df,
            metric=metric,
            threshold=threshold
        )

        return {
            "type": "similarity",
            "parameters": {"metric": metric, "threshold": threshold, "sample_size": sample_size},
            "result": {
                "n_pairs": similarity_df.count(),
                "metric": metric
            },
            "data_sample": similarity_df.limit(10).toPandas().to_dict("records")
        }

    def _execute_outlier_detection(self, df: DataFrame, params: Dict[str, Any]) -> Dict[str, Any]:
        """Execute outlier detection."""
        method = params.get("method", "distance")
        threshold = params.get("threshold", 2.0)

        outlier_df = self.vector_ops.find_outliers(
            df,
            method=method,
            threshold=threshold
        )

        outlier_stats = outlier_df.groupBy("is_outlier").count().toPandas()

        return {
            "type": "outlier_detection",
            "parameters": {"method": method, "threshold": threshold},
            "result": {
                "outlier_counts": outlier_stats.to_dict("records"),
                "method": method
            },
            "data_sample": outlier_df.limit(10).toPandas().to_dict("records")
        }

    def _execute_statistics(self, df: DataFrame, params: Dict[str, Any]) -> Dict[str, Any]:
        """Execute statistical analysis."""
        stats = self.vector_ops.vector_statistics(df)

        return {
            "type": "statistics",
            "parameters": {},
            "result": stats
        }

    def _execute_nearest_neighbors(self, df: DataFrame, params: Dict[str, Any]) -> Dict[str, Any]:
        """Execute nearest neighbors search."""
        query_vector = params.get("query_vector")
        k = params.get("k", 10)
        metric = params.get("metric", "euclidean")

        if not query_vector:
            raise ValueError("query_vector is required for nearest neighbors search")

        nn_df = self.vector_ops.engine.nearest_neighbors_search(
            df,
            query_vector,
            k=k,
            metric=metric
        )

        return {
            "type": "nearest_neighbors",
            "parameters": {"query_vector": query_vector, "k": k, "metric": metric},
            "result": {
                "k": k,
                "metric": metric
            },
            "data_sample": nn_df.toPandas().to_dict("records")
        }

    def _load_dataset_info(self, dataset_id: str) -> Optional[DatasetInfo]:
        """Load dataset info from file."""
        info_path = self.data_dir / f"{dataset_id}_info.json"
        if info_path.exists():
            with open(info_path, 'r') as f:
                dataset_dict = json.load(f)
            return DatasetInfo(**dataset_dict)
        return None

    def _update_job_status(self,
                          job_id: str,
                          status: str,
                          result_path: Optional[str] = None,
                          error_message: Optional[str] = None):
        """Update job status."""
        job_info = self.get_job_status(job_id)
        if job_info:
            job_info.status = status
            if result_path:
                job_info.result_path = result_path
            if error_message:
                job_info.error_message = error_message
            if status == "completed":
                job_info.completed_at = datetime.now()

            # Save updated job info
            job_info_path = self.data_dir / f"{job_id}_job.json"
            with open(job_info_path, 'w') as f:
                json.dump(job_info.dict(), f, default=str, indent=2)

            # Update Redis cache
            if self.redis_available:
                self.redis_client.setex(
                    f"job:{job_id}",
                    86400,
                    json.dumps(job_info.dict(), default=str)
                )

    def _save_job_result(self, job_id: str, result: Dict[str, Any]) -> str:
        """Save job result to file."""
        result_path = self.data_dir / f"{job_id}_result.json"
        with open(result_path, 'w') as f:
            json.dump(result, f, default=str, indent=2)
        return str(result_path)

    def _queue_job(self, job_id: str):
        """Queue job for asynchronous processing."""
        if self.redis_available:
            self.redis_client.lpush("job_queue", job_id)

    def upload_geospatial_dataset(self,
                                file_path: str,
                                name: str,
                                layer_name: Optional[str] = None,
                                feature_type: str = "centroid",
                                vector_columns: Optional[List[str]] = None,
                                description: Optional[str] = None,
                                file_format: str = "gdb") -> DatasetInfo:
        """
        Upload and register a geospatial dataset.

        Args:
            file_path: Path to geospatial file (GDB, GeoPackage, Shapefile)
            name: Dataset name
            layer_name: Specific layer name (for GDB)
            feature_type: Type of features to extract
            vector_columns: List of vector column names (auto-detected if None)
            description: Dataset description
            file_format: File format

        Returns:
            DatasetInfo object
        """
        dataset_id = str(uuid.uuid4())

        # Copy file to data directory
        file_name = Path(file_path).name
        target_path = self.data_dir / f"{dataset_id}_{file_name}"

        if file_path != str(target_path):
            import shutil
            if Path(file_path).is_dir():
                shutil.copytree(file_path, target_path, dirs_exist_ok=True)
            else:
                shutil.copy2(file_path, target_path)

        try:
            # Get geospatial metadata first
            geo_metadata = self.geo_engine.get_geospatial_metadata(str(target_path))

            # Load and process geospatial data
            if file_format.lower() in ["gdb", "geopackage", "shapefile"]:
                # Handle through geospatial engine
                layers_dict = self.geo_engine.read_gdb(
                    str(target_path),
                    layer_name=layer_name
                )

                if not layers_dict:
                    raise ValueError(f"No layers found in geospatial file: {file_path}")

                # Use specified layer or first layer
                if layer_name and layer_name in layers_dict:
                    gdf = layers_dict[layer_name]
                else:
                    gdf = list(layers_dict.values())[0]

                # Extract vector features
                df = self.geo_engine.extract_vector_features(gdf, feature_type)

                # Auto-detect vector columns if not specified
                if not vector_columns:
                    vector_columns = self._detect_vector_columns(df, feature_type)

                # Convert to Spark DataFrame
                spark_df = self.geo_engine.to_spark_dataframe(df, vector_columns)

            else:
                # Use standard Spark loader
                spark_df = self.spark_engine.load_vector_data(
                    str(target_path),
                    vector_columns or [],
                    format=file_format
                )

            count = spark_df.count()
            vector_size = len(spark_df.select("features").first()[0])

            shape = {"n_samples": count, "n_dimensions": vector_size}

            # Create enhanced metadata for geospatial data
            enhanced_metadata = {
                "format": file_format,
                "geospatial": True,
                "feature_type": feature_type,
                "layer_name": layer_name,
                "geo_metadata": geo_metadata
            }

            # Create dataset info
            dataset_info = DatasetInfo(
                id=dataset_id,
                name=name,
                description=description,
                file_path=str(target_path),
                vector_columns=vector_columns,
                shape=shape,
                created_at=datetime.now(),
                metadata=enhanced_metadata
            )

            # Save dataset info
            info_path = self.data_dir / f"{dataset_id}_info.json"
            with open(info_path, 'w') as f:
                json.dump(dataset_info.dict(), f, default=str, indent=2)

            # Cache dataset info in Redis
            if self.redis_available:
                self.redis_client.setex(
                    f"dataset:{dataset_id}",
                    3600,  # 1 hour TTL
                    json.dumps(dataset_info.dict(), default=str)
                )

            self.logger.info(f"Uploaded geospatial dataset: {name} (ID: {dataset_id})")
            return dataset_info

        except Exception as e:
            # Clean up on failure
            if target_path.exists():
                if target_path.is_dir():
                    shutil.rmtree(target_path)
                else:
                    target_path.unlink()
            raise Exception(f"Failed to upload geospatial dataset: {str(e)}")

    def get_geospatial_metadata(self, file_path: str) -> Dict[str, Any]:
        """
        Get metadata for geospatial file.

        Args:
            file_path: Path to geospatial file

        Returns:
            Dictionary with metadata
        """
        try:
            return self.geo_engine.get_geospatial_metadata(file_path)
        except Exception as e:
            self.logger.error(f"Failed to get geospatial metadata: {str(e)}")
            return {"error": str(e)}

    def _detect_vector_columns(self, df, feature_type: str) -> List[str]:
        """Auto-detect vector columns based on feature type."""
        if feature_type == "centroid":
            return ["centroid_x", "centroid_y"]
        elif feature_type == "geometry_coords":
            return [col for col in df.columns if col.startswith("coord_")]
        elif feature_type == "bounds":
            return ["min_x", "min_y", "max_x", "max_y"]
        elif feature_type == "area_perimeter":
            return ["area", "perimeter"]
        elif feature_type == "vertices":
            return ["vertex_count"] + [col for col in df.columns if col.startswith("vertex")]
        else:
            # Fallback to numeric columns
            return [col for col in df.columns if df[col].dtype in ['float64', 'int64']]

    def execute_spatial_clustering(self, dataset_id: str, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Execute spatial clustering job.

        Args:
            dataset_id: Dataset ID
            params: Clustering parameters

        Returns:
            Clustering results
        """
        df = self.load_dataset(dataset_id)
        k = params.get("k", 3)
        max_iter = params.get("max_iter", 100)
        seed = params.get("seed", 42)

        result_df = self.spark_engine.spatial_clustering(
            df,
            k=k,
            max_iter=max_iter,
            seed=seed,
            include_spatial_metrics=True
        )

        # Collect statistics
        cluster_counts = result_df.groupBy("cluster").count().toPandas()

        return {
            "type": "spatial_clustering",
            "parameters": {"k": k, "max_iter": max_iter, "seed": seed},
            "result": {
                "cluster_counts": cluster_counts.to_dict("records"),
                "n_clusters": k,
                "spatial_metrics": True
            },
            "data_sample": result_df.limit(10).toPandas().to_dict("records")
        }

    def execute_spatial_similarity(self, dataset_id: str, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Execute spatial similarity analysis.

        Args:
            dataset_id: Dataset ID
            params: Analysis parameters

        Returns:
            Similarity results
        """
        df = self.load_dataset(dataset_id)
        spatial_weight = params.get("spatial_weight", 0.3)
        feature_weight = params.get("feature_weight", 0.7)
        threshold = params.get("threshold", None)

        result_df = self.spark_engine.spatial_similarity_analysis(
            df,
            spatial_weight=spatial_weight,
            feature_weight=feature_weight,
            threshold=threshold
        )

        return {
            "type": "spatial_similarity",
            "parameters": {
                "spatial_weight": spatial_weight,
                "feature_weight": feature_weight,
                "threshold": threshold
            },
            "result": {
                "n_pairs": result_df.count(),
                "spatial_weight": spatial_weight,
                "feature_weight": feature_weight
            },
            "data_sample": result_df.limit(10).toPandas().to_dict("records")
        }

    def execute_geospatial_statistics(self, dataset_id: str) -> Dict[str, Any]:
        """
        Execute geospatial statistical analysis.

        Args:
            dataset_id: Dataset ID

        Returns:
            Geospatial statistics
        """
        df = self.load_dataset(dataset_id)
        stats = self.spark_engine.geospatial_statistics(df)

        return {
            "type": "geospatial_statistics",
            "parameters": {},
            "result": stats
        }

    def cleanup(self):
        """Clean up resources."""
        if self.spark_engine:
            self.spark_engine.stop()
        if self.redis_available:
            self.redis_client.close()