from typing import Any, Dict, List, Optional, Union
import numpy as np
from pyspark.sql import SparkSession, DataFrame
from pyspark.ml.feature import VectorAssembler, PCA, StandardScaler
from pyspark.ml.clustering import KMeans, BisectingKMeans
from pyspark.ml.linalg import VectorUDT, Vectors
from pyspark.sql.functions import col, udf, pandas_udf, PandasUDFType
from pyspark.sql.types import ArrayType, FloatType, IntegerType, StructType
import logging


class SparkVectorEngine:
    """Spark-based vector computation engine for large-scale vector analytics."""

    def __init__(self,
                 app_name: str = "VectorAnalytics",
                 master_url: str = "local[*]",
                 config: Optional[Dict[str, str]] = None):
        """
        Initialize Spark session for vector computations.

        Args:
            app_name: Name of the Spark application
            master_url: Spark master URL (e.g., "local[*]", "spark://master:7077")
            config: Additional Spark configuration
        """
        self.app_name = app_name
        self.master_url = master_url
        self.config = config or {}

        # Initialize Spark session
        self.spark = self._create_spark_session()
        self.logger = logging.getLogger(__name__)

    def _create_spark_session(self) -> SparkSession:
        """Create and configure Spark session."""
        builder = SparkSession.builder.appName(self.app_name).master(self.master_url)

        # Apply custom configuration
        for key, value in self.config.items():
            builder = builder.config(key, value)

        # Configure for vector operations
        builder = builder.config("spark.sql.execution.arrow.pyspark.enabled", "true") \
                     .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
                     .config("spark.kryoserializer.buffer.max", "512m") \
                     .config("spark.sql.legacy.allowNonEmptyLocationInCTE", "true") \
                     .config("spark.sql.execution.arrow.maxRecordsPerBatch", "10000")

        return builder.getOrCreate()

    def load_vector_data(self,
                        file_path: str,
                        vector_columns: List[str],
                        format: str = "csv",
                        **kwargs) -> DataFrame:
        """
        Load vector data from file.

        Args:
            file_path: Path to data file
            vector_columns: List of columns that form vectors
            format: File format (csv, json, parquet)
            **kwargs: Additional read options

        Returns:
            DataFrame with vector data
        """
        if format.lower() == "csv":
            df = self.spark.read.csv(file_path, header=True, inferSchema=True, **kwargs)
        elif format.lower() == "json":
            df = self.spark.read.json(file_path, **kwargs)
        elif format.lower() == "parquet":
            df = self.spark.read.parquet(file_path, **kwargs)
        elif format.lower() in ["gdb", "geopackage", "shapefile"]:
            # Handle geospatial data through geospatial engine
            from .geospatial_engine import GeospatialEngine
            geo_engine = GeospatialEngine(self.spark)
            return self._load_geospatial_data(geo_engine, file_path, vector_columns, format, **kwargs)
        else:
            raise ValueError(f"Unsupported format: {format}")

        # Convert vector columns to vector type
        if len(vector_columns) > 0:
            assembler = VectorAssembler(
                inputCols=vector_columns,
                outputCol="features",
                handleInvalid="skip"
            )
            df = assembler.transform(df)

        self.logger.info(f"Loaded {df.count()} vectors from {file_path}")
        return df

    def compute_similarity_matrix(self,
                                 df: DataFrame,
                                 vector_col: str = "features",
                                 metric: str = "cosine",
                                 threshold: Optional[float] = None) -> DataFrame:
        """
        Compute similarity matrix for vectors.

        Args:
            df: DataFrame with vector data
            vector_col: Name of vector column
            metric: Similarity metric ('cosine', 'euclidean', 'manhattan')
            threshold: Minimum similarity threshold

        Returns:
            DataFrame with similarity scores
        """
        if metric == "cosine":
            similarity_udf = self._cosine_similarity_udf()
        elif metric == "euclidean":
            similarity_udf = self._euclidean_distance_udf()
        elif metric == "manhattan":
            similarity_udf = self._manhattan_distance_udf()
        else:
            raise ValueError(f"Unsupported metric: {metric}")

        # Self-join to compute pairwise similarities
        df1 = df.select(col(vector_col).alias("v1"))
        df2 = df.select(col(vector_col).alias("v2"))

        pairwise_df = df1.crossJoin(df2)
        similarity_df = pairwise_df.withColumn(
            "similarity",
            similarity_udf(col("v1"), col("v2"))
        )

        # Apply threshold if specified
        if threshold is not None:
            similarity_df = similarity_df.filter(col("similarity") >= threshold)

        return similarity_df

    def kmeans_clustering(self,
                         df: DataFrame,
                         vector_col: str = "features",
                         k: int = 3,
                         max_iter: int = 100,
                         seed: int = 42) -> DataFrame:
        """
        Perform K-means clustering on vectors.

        Args:
            df: DataFrame with vector data
            vector_col: Name of vector column
            k: Number of clusters
            max_iter: Maximum iterations
            seed: Random seed

        Returns:
            DataFrame with cluster assignments
        """
        kmeans = KMeans(
            featuresCol=vector_col,
            predictionCol="cluster",
            k=k,
            maxIter=max_iter,
            seed=seed
        )

        model = kmeans.fit(df)
        clustered_df = model.transform(df)

        # Add cluster centers info
        centers = model.clusterCenters()
        centers_broadcast = self.spark.sparkContext.broadcast(centers)

        def get_center_distance(vector):
            center = centers_broadcast.values[vector['cluster']]
            return float(np.linalg.norm(vector['features'].toArray() - center))

        get_center_distance_udf = udf(get_center_distance, FloatType())
        clustered_df = clustered_df.withColumn(
            "center_distance",
            get_center_distance_udf(col("features"), col("cluster"))
        )

        # Evaluate clustering quality
        evaluator = model.summary
        self.logger.info(f"K-means clustering completed:")
        self.logger.info(f"  - Number of clusters: {k}")
        self.logger.info(f"  - Silhouette score: {evaluator.silhouette:.3f}")
        self.logger.info(f"  - Within-cluster sum of squares: {evaluator.trainingCost:.3f}")

        return clustered_df

    def pca_dimensionality_reduction(self,
                                    df: DataFrame,
                                    vector_col: str = "features",
                                    k: int = 10) -> DataFrame:
        """
        Perform PCA dimensionality reduction.

        Args:
            df: DataFrame with vector data
            vector_col: Name of vector column
            k: Number of principal components

        Returns:
            DataFrame with reduced dimensions
        """
        pca = PCA(
            featuresCol=vector_col,
            outputCol="pca_features",
            k=k
        )

        model = pca.fit(df)
        reduced_df = model.transform(df)

        # Log explained variance
        explained_variance = model.explainedVariance
        self.logger.info(f"PCA dimensionality reduction to {k} components:")
        self.logger.info(f"  - Total explained variance: {sum(explained_variance):.3f}")

        return reduced_df

    def vector_arithmetic(self,
                         df: DataFrame,
                         operation: str,
                         operand: Union[float, np.ndarray, str],
                         vector_col: str = "features",
                         output_col: str = "result") -> DataFrame:
        """
        Perform arithmetic operations on vectors.

        Args:
            df: DataFrame with vector data
            operation: Operation type ('add', 'subtract', 'multiply', 'divide')
            operand: Operand for the operation
            vector_col: Name of vector column
            output_col: Output column name

        Returns:
            DataFrame with operation results
        """
        if isinstance(operand, str):
            # If operand is a column name, get reference column
            operand_df = df.select(col(operand).alias("operand"))
            df = df.crossJoin(operand_df)
            operand_col = "operand"
        else:
            # If operand is scalar, create literal
            if isinstance(operand, (int, float)):
                operand_col = lit(operand)
            else:
                # If operand is array, create vector literal
                operand_col = lit(Vectors.dense(operand))

        if operation == "add":
            result_df = df.withColumn(output_col, col(vector_col) + operand_col)
        elif operation == "subtract":
            result_df = df.withColumn(output_col, col(vector_col) - operand_col)
        elif operation == "multiply":
            result_df = df.withColumn(output_col, col(vector_col) * operand_col)
        elif operation == "divide":
            result_df = df.withColumn(output_col, col(vector_col) / operand_col)
        else:
            raise ValueError(f"Unsupported operation: {operation}")

        return result_df

    def nearest_neighbors_search(self,
                                 df: DataFrame,
                                 query_vector: List[float],
                                 vector_col: str = "features",
                                 k: int = 10,
                                 metric: str = "euclidean") -> DataFrame:
        """
        Find k nearest neighbors to a query vector.

        Args:
            df: DataFrame with vector data
            query_vector: Query vector
            vector_col: Name of vector column
            k: Number of nearest neighbors
            metric: Distance metric

        Returns:
            DataFrame with nearest neighbors
        """
        query_vec = Vectors.dense(query_vector)

        if metric == "euclidean":
            distance_udf = self._euclidean_distance_to_query_udf(query_vec)
        elif metric == "cosine":
            distance_udf = self._cosine_distance_to_query_udf(query_vec)
        else:
            raise ValueError(f"Unsupported metric: {metric}")

        result_df = df.withColumn("distance", distance_udf(col(vector_col)))
        result_df = result_df.orderBy(col("distance").asc()).limit(k)

        return result_df

    def _cosine_similarity_udf(self):
        """Create UDF for cosine similarity calculation."""
        def cosine_similarity(v1, v2):
            if v1 is None or v2 is None:
                return 0.0
            dot_product = float(np.dot(v1.toArray(), v2.toArray()))
            norm1 = float(np.linalg.norm(v1.toArray()))
            norm2 = float(np.linalg.norm(v2.toArray()))
            if norm1 == 0 or norm2 == 0:
                return 0.0
            return dot_product / (norm1 * norm2)

        return udf(cosine_similarity, FloatType())

    def _euclidean_distance_udf(self):
        """Create UDF for Euclidean distance calculation."""
        def euclidean_distance(v1, v2):
            if v1 is None or v2 is None:
                return float('inf')
            return float(np.linalg.norm(v1.toArray() - v2.toArray()))

        return udf(euclidean_distance, FloatType())

    def _manhattan_distance_udf(self):
        """Create UDF for Manhattan distance calculation."""
        def manhattan_distance(v1, v2):
            if v1 is None or v2 is None:
                return float('inf')
            return float(np.sum(np.abs(v1.toArray() - v2.toArray())))

        return udf(manhattan_distance, FloatType())

    def _euclidean_distance_to_query_udf(self, query_vector):
        """Create UDF for distance to query vector."""
        def distance_to_query(v1):
            if v1 is None:
                return float('inf')
            return float(np.linalg.norm(v1.toArray() - query_vector.toArray()))

        return udf(distance_to_query, FloatType())

    def _cosine_distance_to_query_udf(self, query_vector):
        """Create UDF for cosine distance to query vector."""
        def cosine_distance_to_query(v1):
            if v1 is None:
                return 1.0
            dot_product = float(np.dot(v1.toArray(), query_vector.toArray()))
            norm1 = float(np.linalg.norm(v1.toArray()))
            norm2 = float(np.linalg.norm(query_vector.toArray()))
            if norm1 == 0 or norm2 == 0:
                return 1.0
            similarity = dot_product / (norm1 * norm2)
            return 1.0 - similarity  # Convert similarity to distance

        return udf(cosine_distance_to_query, FloatType())

    def _load_geospatial_data(self,
                             geo_engine,
                             file_path: str,
                             vector_columns: List[str],
                             format: str,
                             layer_name: Optional[str] = None,
                             feature_type: str = "centroid",
                             **kwargs) -> DataFrame:
        """
        Load and process geospatial data.

        Args:
            geo_engine: GeospatialEngine instance
            file_path: Path to geospatial file
            vector_columns: Vector column names
            format: File format
            layer_name: Specific layer name (for GDB)
            feature_type: Type of features to extract
            **kwargs: Additional options

        Returns:
            Spark DataFrame with processed geospatial data
        """
        try:
            # Read geospatial data
            if format.lower() == "gdb":
                layers_dict = geo_engine.read_gdb(file_path, layer_name=layer_name)
                if not layers_dict:
                    raise ValueError(f"No layers found in GDB: {file_path}")

                # Use first layer or specified layer
                if layer_name and layer_name in layers_dict:
                    gdf = layers_dict[layer_name]
                else:
                    gdf = list(layers_dict.values())[0]
            else:
                # Read single file
                layers_dict = geo_engine.read_gdb(file_path)
                if not layers_dict:
                    raise ValueError(f"Failed to read geospatial file: {file_path}")
                gdf = list(layers_dict.values())[0]

            self.logger.info(f"Loaded {len(gdf)} features from geospatial data")

            # Extract vector features
            if feature_type == "centroid":
                df = geo_engine.extract_vector_features(gdf, "centroid")
                # Default vector columns for centroid features
                if not vector_columns:
                    vector_columns = ["centroid_x", "centroid_y"]
            elif feature_type == "geometry_coords":
                df = geo_engine.extract_vector_features(gdf, "geometry_coords")
                # Use coordinate columns as vector features
                if not vector_columns:
                    coord_cols = [col for col in df.columns if col.startswith("coord_")]
                    vector_columns = coord_cols
            else:
                df = geo_engine.extract_vector_features(gdf, feature_type)

            # Convert to Spark DataFrame
            spark_df = geo_engine.to_spark_dataframe(df, vector_columns)

            self.logger.info(f"Converted geospatial data to Spark DataFrame with {spark_df.count()} rows")
            return spark_df

        except Exception as e:
            self.logger.error(f"Failed to load geospatial data: {str(e)}")
            raise

    def spatial_clustering(self,
                         df: DataFrame,
                         vector_col: str = "features",
                         k: int = 3,
                         max_iter: int = 100,
                         seed: int = 42,
                         include_spatial_metrics: bool = True) -> DataFrame:
        """
        Perform spatial clustering with additional spatial metrics.

        Args:
            df: DataFrame with spatial vector data
            vector_col: Name of vector column
            k: Number of clusters
            max_iter: Maximum iterations
            seed: Random seed
            include_spatial_metrics: Whether to include spatial quality metrics

        Returns:
            DataFrame with clustering results and spatial metrics
        """
        # Perform standard K-means clustering
        clustered_df = self.kmeans_clustering(df, vector_col, k, max_iter, seed)

        if include_spatial_metrics:
            # Add spatial quality metrics
            clustered_df = self._add_spatial_clustering_metrics(clustered_df, vector_col)

        return clustered_df

    def _add_spatial_clustering_metrics(self, df: DataFrame, vector_col: str) -> DataFrame:
        """Add spatial clustering quality metrics."""
        # Calculate cluster compactness (intra-cluster distance)
        def cluster_compactness(cluster_id, vector):
            # This would require cluster statistics - simplified version
            return float(np.linalg.norm(vector.toArray()))

        compactness_udf = udf(cluster_compactness, FloatType())
        df = df.withColumn("compactness", compactness_udf(col("cluster"), col(vector_col)))

        return df

    def spatial_similarity_analysis(self,
                                  df: DataFrame,
                                  vector_col: str = "features",
                                  spatial_weight: float = 0.3,
                                  feature_weight: float = 0.7,
                                  threshold: Optional[float] = None) -> DataFrame:
        """
        Perform spatial similarity analysis combining spatial and feature similarity.

        Args:
            df: DataFrame with spatial vector data
            vector_col: Name of vector column
            spatial_weight: Weight for spatial similarity
            feature_weight: Weight for feature similarity
            threshold: Similarity threshold

        Returns:
            DataFrame with combined similarity scores
        """
        # Compute feature similarity
        feature_similarity_df = self.compute_similarity_matrix(df, vector_col, "cosine", None)

        # Compute spatial similarity (simplified - would need coordinates)
        def spatial_similarity(v1, v2):
            # Simplified spatial similarity based on first two dimensions
            if v1 is None or v2 is None:
                return 0.0
            v1_array = v1.toArray()
            v2_array = v2.toArray()
            if len(v1_array) < 2 or len(v2_array) < 2:
                return 0.0

            # Use inverse distance as spatial similarity
            dist = np.linalg.norm(v1_array[:2] - v2_array[:2])
            return 1.0 / (1.0 + dist)

        spatial_sim_udf = udf(spatial_similarity, FloatType())

        # Compute combined similarity
        df1 = df.select(col(vector_col).alias("v1"))
        df2 = df.select(col(vector_col).alias("v2"))
        pairwise_df = df1.crossJoin(df2)

        combined_df = pairwise_df.withColumn(
            "feature_similarity",
            self._cosine_similarity_udf()(col("v1"), col("v2"))
        ).withColumn(
            "spatial_similarity",
            spatial_sim_udf(col("v1"), col("v2"))
        ).withColumn(
            "combined_similarity",
            (col("feature_similarity") * feature_weight + col("spatial_similarity") * spatial_weight)
        )

        # Apply threshold if specified
        if threshold is not None:
            combined_df = combined_df.filter(col("combined_similarity") >= threshold)

        return combined_df

    def geospatial_statistics(self,
                            df: DataFrame,
                            vector_col: str = "features") -> Dict[str, Any]:
        """
        Compute geospatial-specific statistics.

        Args:
            df: DataFrame with spatial vector data
            vector_col: Name of vector column

        Returns:
            Dictionary with geospatial statistics
        """
        # Basic vector statistics
        from .vector_operations import VectorOperations
        vector_ops = VectorOperations(self)
        basic_stats = vector_ops.vector_statistics(df, vector_col)

        # Add geospatial-specific statistics
        geospatial_stats = {
            **basic_stats,
            "spatial_correlation": self._compute_spatial_correlation(df, vector_col),
            "spatial_autocorrelation": self._compute_spatial_autocorrelation(df, vector_col)
        }

        return geospatial_stats

    def _compute_spatial_correlation(self, df: DataFrame, vector_col: str) -> Dict[str, float]:
        """Compute spatial correlation metrics."""
        # Simplified implementation
        try:
            # Extract coordinates (assuming first two dimensions are spatial)
            coords = df.select(
                col(vector_col).getItem(0).alias("x"),
                col(vector_col).getItem(1).alias("y")
            ).collect()

            x_coords = [row["x"] for row in coords]
            y_coords = [row["y"] for row in coords]

            if len(x_coords) > 1:
                correlation = np.corrcoef(x_coords, y_coords)[0, 1]
                return {"xy_correlation": float(correlation)}
            else:
                return {"xy_correlation": 0.0}

        except Exception as e:
            self.logger.warning(f"Failed to compute spatial correlation: {str(e)}")
            return {"xy_correlation": 0.0}

    def _compute_spatial_autocorrelation(self, df: DataFrame, vector_col: str) -> Dict[str, float]:
        """Compute spatial autocorrelation (simplified Moran's I)."""
        try:
            # Simplified spatial autocorrelation
            # This is a basic implementation - real Moran's I would need spatial weights
            data = df.select(vector_col).collect()
            vectors = [row[vector_col].toArray() for row in data]

            if len(vectors) > 1:
                # Compute average distance between all pairs
                total_distance = 0
                count = 0
                for i in range(len(vectors)):
                    for j in range(i + 1, len(vectors)):
                        distance = np.linalg.norm(vectors[i] - vectors[j])
                        total_distance += distance
                        count += 1

                avg_distance = total_distance / count if count > 0 else 0
                return {"avg_neighbor_distance": float(avg_distance)}
            else:
                return {"avg_neighbor_distance": 0.0}

        except Exception as e:
            self.logger.warning(f"Failed to compute spatial autocorrelation: {str(e)}")
            return {"avg_neighbor_distance": 0.0}

    def stop(self):
        """Stop Spark session."""
        if self.spark:
            self.spark.stop()
            self.logger.info("Spark session stopped")

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop()