from typing import List, Dict, Any, Optional, Tuple
import numpy as np
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, udf, explode, array, lit
from pyspark.sql.types import ArrayType, FloatType, IntegerType, StructType, StructField
from pyspark.ml.linalg import VectorUDT, Vectors
import logging
from .spark_engine import SparkVectorEngine


class VectorOperations:
    """High-level vector operations for spatial analytics."""

    def __init__(self, spark_engine: SparkVectorEngine):
        """
        Initialize vector operations with Spark engine.

        Args:
            spark_engine: SparkVectorEngine instance
        """
        self.engine = spark_engine
        self.spark = spark_engine.spark
        self.logger = logging.getLogger(__name__)

    def create_synthetic_vectors(self,
                                 n_samples: int,
                                 n_dimensions: int,
                                 distribution: str = "normal",
                                 cluster_centers: Optional[List[List[float]]] = None,
                                 noise_level: float = 0.1) -> DataFrame:
        """
        Create synthetic vector data for testing.

        Args:
            n_samples: Number of samples to generate
            n_dimensions: Vector dimensionality
            distribution: Distribution type ('normal', 'uniform', 'clustered')
            cluster_centers: Centers for clustered distribution
            noise_level: Noise level for clustered data

        Returns:
            DataFrame with synthetic vectors
        """
        if distribution == "normal":
            data = np.random.normal(0, 1, (n_samples, n_dimensions))
        elif distribution == "uniform":
            data = np.random.uniform(-1, 1, (n_samples, n_dimensions))
        elif distribution == "clustered" and cluster_centers:
            # Generate clustered data
            n_clusters = len(cluster_centers)
            samples_per_cluster = n_samples // n_clusters
            data_list = []

            for center in cluster_centers:
                cluster_data = np.random.normal(center, noise_level, (samples_per_cluster, n_dimensions))
                data_list.append(cluster_data)

            # Handle remaining samples
            remaining = n_samples - len(data_list) * samples_per_cluster
            if remaining > 0:
                extra_center = cluster_centers[0]
                extra_data = np.random.normal(extra_center, noise_level, (remaining, n_dimensions))
                data_list.append(extra_data)

            data = np.vstack(data_list)
        else:
            raise ValueError(f"Unsupported distribution: {distribution}")

        # Create Spark DataFrame
        schema = StructType([
            StructField("id", IntegerType(), False),
            StructField("features", VectorUDT(), False)
        ])

        rows = []
        for i, vector in enumerate(data):
            rows.append((i, Vectors.dense(vector)))

        df = self.spark.createDataFrame(rows, schema)
        self.logger.info(f"Generated {n_samples} synthetic vectors with {n_dimensions} dimensions")

        return df

    def vector_statistics(self, df: DataFrame, vector_col: str = "features") -> Dict[str, Any]:
        """
        Compute comprehensive statistics for vector dataset.

        Args:
            df: DataFrame with vector data
            vector_col: Name of vector column

        Returns:
            Dictionary containing statistics
        """
        # Get dimensionality
        first_vector = df.select(vector_col).first()
        if first_vector is None:
            return {"error": "Empty dataset"}

        vector_size = len(first_vector[vector_col])

        # Compute basic statistics
        stats = {
            "n_samples": df.count(),
            "vector_size": vector_size,
            "data_type": "spark_vector"
        }

        # Compute mean, std, min, max for each dimension
        def extract_components(vector):
            return vector.toArray().tolist()

        extract_udf = udf(extract_components, ArrayType(FloatType()))
        components_df = df.withColumn("components", extract_udf(col(vector_col)))

        # Create separate columns for each dimension
        for i in range(vector_size):
            components_df = components_df.withColumn(f"dim_{i}", col("components")[i])

        # Compute statistics
        dimension_stats = {}
        for i in range(vector_size):
            dim_stats = components_df.select(
                col(f"dim_{i}").alias("value")
            ).describe().toPandas()

            dimension_stats[i] = {
                "mean": float(dim_stats[dim_stats['summary'] == 'mean']['value'].iloc[0]),
                "std": float(dim_stats[dim_stats['summary'] == 'stddev']['value'].iloc[0]),
                "min": float(dim_stats[dim_stats['summary'] == 'min']['value'].iloc[0]),
                "max": float(dim_stats[dim_stats['summary'] == 'max']['value'].iloc[0])
            }

        stats["dimension_statistics"] = dimension_stats

        # Compute vector norms
        def vector_norm(vector):
            return float(np.linalg.norm(vector.toArray()))

        norm_udf = udf(vector_norm, FloatType())
        norms_df = df.withColumn("norm", norm_udf(col(vector_col)))

        norm_stats = norms_df.select("norm").describe().toPandas()
        stats["norm_statistics"] = {
            "mean": float(norm_stats[norm_stats['summary'] == 'mean']['norm'].iloc[0]),
            "std": float(norm_stats[norm_stats['summary'] == 'stddev']['norm'].iloc[0]),
            "min": float(norm_stats[norm_stats['summary'] == 'min']['norm'].iloc[0]),
            "max": float(norm_stats[norm_stats['summary'] == 'max']['norm'].iloc[0])
        }

        return stats

    def find_outliers(self,
                     df: DataFrame,
                     vector_col: str = "features",
                     method: str = "distance",
                     threshold: float = 2.0) -> DataFrame:
        """
        Find outliers in vector dataset.

        Args:
            df: DataFrame with vector data
            vector_col: Name of vector column
            method: Outlier detection method ('distance', 'density')
            threshold: Threshold for outlier detection

        Returns:
            DataFrame with outlier indicators
        """
        if method == "distance":
            # Distance-based outlier detection using k-nearest neighbors
            return self._distance_based_outliers(df, vector_col, threshold)
        elif method == "density":
            # Density-based outlier detection using local outlier factor approximation
            return self._density_based_outliers(df, vector_col, threshold)
        else:
            raise ValueError(f"Unsupported outlier detection method: {method}")

    def _distance_based_outliers(self, df: DataFrame, vector_col: str, threshold: float) -> DataFrame:
        """Distance-based outlier detection."""
        # Compute pairwise distances to find average distance to k-nearest neighbors
        k = min(10, df.count() - 1)  # Use at most 10 neighbors

        def vector_norm(vector):
            return float(np.linalg.norm(vector.toArray()))

        # For simplicity, use distance from mean as outlier score
        norm_udf = udf(vector_norm, FloatType())
        df_with_norm = df.withColumn("norm", norm_udf(col(vector_col)))

        # Compute mean norm
        mean_norm = df_with_norm.select("norm").rdd.map(lambda x: x[0]).mean()
        std_norm = df_with_norm.select("norm").rdd.map(lambda x: x[0]).stdev()

        # Mark outliers based on distance from mean
        df_with_outliers = df_with_norm.withColumn(
            "outlier_score",
            (col("norm") - lit(mean_norm)) / lit(std_norm)
        )

        df_with_outliers = df_with_outliers.withColumn(
            "is_outlier",
            (abs(col("outlier_score")) > lit(threshold)).cast("integer")
        )

        return df_with_outliers

    def _density_based_outliers(self, df: DataFrame, vector_col: str, threshold: float) -> DataFrame:
        """Density-based outlier detection (simplified LOF approximation)."""
        # For simplicity, use distance to nearest neighbor as density indicator
        k = 5  # Number of nearest neighbors

        # This is a simplified version - true LOF would require more complex implementation
        def compute_density_score(vector):
            # Simple density score based on vector magnitude
            norm = float(np.linalg.norm(vector.toArray()))
            return 1.0 / (norm + 1e-6)  # Avoid division by zero

        density_udf = udf(compute_density_score, FloatType())
        df_with_density = df.withColumn("density_score", density_udf(col(vector_col)))

        # Compute mean density
        densities = df_with_density.select("density_score").rdd.map(lambda x: x[0]).collect()
        mean_density = np.mean(densities)
        std_density = np.std(densities)

        # Mark outliers based on density deviation
        df_with_outliers = df_with_density.withColumn(
            "density_z_score",
            (col("density_score") - lit(mean_density)) / lit(std_density)
        )

        df_with_outliers = df_with_outliers.withColumn(
            "is_outlier",
            (abs(col("density_z_score")) > lit(threshold)).cast("integer")
        )

        return df_with_outliers

    def vector_aggregation(self,
                          df: DataFrame,
                          group_col: str,
                          vector_col: str = "features",
                          operation: str = "mean") -> DataFrame:
        """
        Aggregate vectors by group.

        Args:
            df: DataFrame with vector data and grouping column
            group_col: Column to group by
            vector_col: Name of vector column
            operation: Aggregation operation ('mean', 'sum', 'max', 'min')

        Returns:
            DataFrame with aggregated vectors
        """
        def aggregate_vectors_udf(operation):
            def aggregate_vectors(vectors):
                if not vectors:
                    return None

                arrays = [v.toArray() for v in vectors if v is not None]
                if not arrays:
                    return None

                stacked = np.vstack(arrays)

                if operation == "mean":
                    result = np.mean(stacked, axis=0)
                elif operation == "sum":
                    result = np.sum(stacked, axis=0)
                elif operation == "max":
                    result = np.max(stacked, axis=0)
                elif operation == "min":
                    result = np.min(stacked, axis=0)
                else:
                    raise ValueError(f"Unsupported aggregation: {operation}")

                return Vectors.dense(result)

            return udf(aggregate_vectors, VectorUDT())

        agg_udf = aggregate_vectors_udf(operation)
        result_df = df.groupBy(group_col).agg(
            agg_udf(collect_list(vector_col)).alias(f"aggregated_{operation}_{vector_col}")
        )

        return result_df

    def vector_similarity_search(self,
                                 df1: DataFrame,
                                 df2: DataFrame,
                                 vector_col: str = "features",
                                 metric: str = "cosine",
                                 threshold: Optional[float] = None,
                                 top_k: Optional[int] = None) -> DataFrame:
        """
        Find similar vectors between two datasets.

        Args:
            df1: First dataset
            df2: Second dataset
            vector_col: Name of vector column
            metric: Similarity metric
            threshold: Similarity threshold
            top_k: Return top k most similar pairs

        Returns:
            DataFrame with similarity scores
        """
        # Add IDs for cross join
        df1_with_id = df1.withColumn("id1", monotonically_increasing_id())
        df2_with_id = df2.withColumn("id2", monotonically_increasing_id())

        # Cross join and compute similarities
        similarity_df = self.engine.compute_similarity_matrix(
            df1_with_id.crossJoin(df2_with_id),
            vector_col,
            metric
        )

        # Filter and sort
        if threshold is not None:
            similarity_df = similarity_df.filter(col("similarity") >= threshold)

        if top_k is not None:
            similarity_df = similarity_df.orderBy(col("similarity").desc()).limit(top_k)

        return similarity_df.select(
            col("id1"),
            col("id2"),
            col("similarity")
        )

    def dimension_importance_analysis(self,
                                     df: DataFrame,
                                     target_col: Optional[str] = None,
                                     vector_col: str = "features") -> Dict[str, Any]:
        """
        Analyze importance of each vector dimension.

        Args:
            df: DataFrame with vector data
            target_col: Optional target column for supervised analysis
            vector_col: Name of vector column

        Returns:
            Dictionary with dimension importance scores
        """
        # Extract dimension values
        def extract_components(vector):
            return vector.toArray().tolist()

        extract_udf = udf(extract_components, ArrayType(FloatType()))
        components_df = df.withColumn("components", extract_udf(col(vector_col)))

        # Get dimensionality
        first_components = components_df.select("components").first()
        if first_components is None:
            return {"error": "Empty dataset"}

        n_dimensions = len(first_components[0])

        # Create separate columns for each dimension
        for i in range(n_dimensions):
            components_df = components_df.withColumn(f"dim_{i}", col("components")[i])

        # Compute variance for each dimension
        dimension_variances = {}
        for i in range(n_dimensions):
            variance = components_df.select(f"dim_{i}").rdd.map(lambda x: x[0]).variance()
            dimension_variances[i] = variance

        # Normalize variances to get importance scores
        total_variance = sum(dimension_variances.values())
        dimension_importance = {
            i: {"variance": var, "importance": var / total_variance}
            for i, var in dimension_variances.items()
        }

        # Sort by importance
        sorted_importance = sorted(dimension_importance.items(),
                                 key=lambda x: x[1]["importance"], reverse=True)

        return {
            "dimension_importance": dict(sorted_importance),
            "total_variance": total_variance,
            "n_dimensions": n_dimensions
        }