#!/usr/bin/env python3
"""
Script to generate sample vector data for testing the Vector Analytics Platform.
"""

import numpy as np
import pandas as pd
from sklearn.datasets import make_blobs, make_classification
import os
import json


def generate_kmeans_sample_data(n_samples=1000, n_features=10, n_clusters=3, random_state=42):
    """Generate sample data suitable for K-means clustering."""
    centers = np.random.randn(n_clusters, n_features) * 2
    X, y = make_blobs(
        n_samples=n_samples,
        n_features=n_features,
        centers=centers,
        cluster_std=0.8,
        random_state=random_state
    )

    # Create feature names
    feature_names = [f"feature_{i+1}" for i in range(n_features)]

    # Create DataFrame
    df = pd.DataFrame(X, columns=feature_names)
    df['true_cluster'] = y
    df['id'] = range(len(df))

    return df


def generate_classification_data(n_samples=1000, n_features=20, n_classes=2, random_state=42):
    """Generate sample data suitable for classification tasks."""
    X, y = make_classification(
        n_samples=n_samples,
        n_features=n_features,
        n_informative=max(5, n_features // 4),
        n_redundant=max(2, n_features // 8),
        n_classes=n_classes,
        random_state=random_state
    )

    # Create feature names
    feature_names = [f"feature_{i+1}" for i in range(n_features)]

    # Create DataFrame
    df = pd.DataFrame(X, columns=feature_names)
    df['label'] = y
    df['id'] = range(len(df))

    return df


def generate_outlier_data(n_samples=1000, n_features=8, outlier_fraction=0.1, random_state=42):
    """Generate data with outliers."""
    # Generate normal data
    normal_data = np.random.randn(int(n_samples * (1 - outlier_fraction)), n_features)

    # Generate outliers (much larger variance)
    outlier_data = np.random.randn(int(n_samples * outlier_fraction), n_features) * 5

    # Combine data
    X = np.vstack([normal_data, outlier_data])

    # Create labels (0 for normal, 1 for outliers)
    y = np.hstack([
        np.zeros(len(normal_data)),
        np.ones(len(outlier_data))
    ])

    # Shuffle
    indices = np.random.permutation(len(X))
    X = X[indices]
    y = y[indices]

    # Create feature names
    feature_names = [f"feature_{i+1}" for i in range(n_features)]

    # Create DataFrame
    df = pd.DataFrame(X, columns=feature_names)
    df['is_outlier'] = y.astype(int)
    df['id'] = range(len(df))

    return df


def generate_high_dimensional_data(n_samples=500, n_features=100, random_state=42):
    """Generate high-dimensional data suitable for PCA."""
    np.random.seed(random_state)

    # Create data with some structure
    n_informative = min(20, n_features // 5)
    X_informative = np.random.randn(n_samples, n_informative)

    # Add some noise dimensions
    n_noise = n_features - n_informative
    X_noise = np.random.randn(n_samples, n_noise)

    # Combine
    X = np.hstack([X_informative, X_noise])

    # Create feature names
    feature_names = [f"feature_{i+1}" for i in range(n_features)]

    # Create DataFrame
    df = pd.DataFrame(X, columns=feature_names)
    df['id'] = range(len(df))

    return df


def create_dataset_metadata(df, name, description, vector_columns):
    """Create metadata for a dataset."""
    metadata = {
        "name": name,
        "description": description,
        "vector_columns": vector_columns,
        "shape": {
            "n_samples": len(df),
            "n_dimensions": len(vector_columns)
        },
        "column_names": list(df.columns),
        "dtypes": df.dtypes.astype(str).to_dict(),
        "created_at": pd.Timestamp.now().isoformat()
    }
    return metadata


def main():
    """Generate all sample datasets."""
    # Create data directory
    data_dir = "data"
    os.makedirs(data_dir, exist_ok=True)

    # Generate K-means dataset
    print("Generating K-means dataset...")
    kmeans_df = generate_kmeans_sample_data(n_samples=2000, n_features=15, n_clusters=5)
    kmeans_df.to_csv(f"{data_dir}/kmeans_sample.csv", index=False)
    kmeans_metadata = create_dataset_metadata(
        kmeans_df,
        "K-means Sample Data",
        "Synthetic data generated for K-means clustering with 5 clusters",
        [f"feature_{i+1}" for i in range(15)]
    )
    with open(f"{data_dir}/kmeans_sample_metadata.json", 'w') as f:
        json.dump(kmeans_metadata, f, indent=2)

    # Generate classification dataset
    print("Generating classification dataset...")
    classification_df = generate_classification_data(n_samples=3000, n_features=25, n_classes=3)
    classification_df.to_csv(f"{data_dir}/classification_sample.csv", index=False)
    classification_metadata = create_dataset_metadata(
        classification_df,
        "Classification Sample Data",
        "Synthetic data generated for classification tasks with 3 classes",
        [f"feature_{i+1}" for i in range(25)]
    )
    with open(f"{data_dir}/classification_sample_metadata.json", 'w') as f:
        json.dump(classification_metadata, f, indent=2)

    # Generate outlier dataset
    print("Generating outlier dataset...")
    outlier_df = generate_outlier_data(n_samples=1500, n_features=10, outlier_fraction=0.15)
    outlier_df.to_csv(f"{data_dir}/outlier_sample.csv", index=False)
    outlier_metadata = create_dataset_metadata(
        outlier_df,
        "Outlier Detection Sample",
        "Synthetic data with outliers for outlier detection algorithms",
        [f"feature_{i+1}" for i in range(10)]
    )
    with open(f"{data_dir}/outlier_sample_metadata.json", 'w') as f:
        json.dump(outlier_metadata, f, indent=2)

    # Generate high-dimensional dataset
    print("Generating high-dimensional dataset...")
    pca_df = generate_high_dimensional_data(n_samples=800, n_features=150)
    pca_df.to_csv(f"{data_dir}/pca_sample.csv", index=False)
    pca_metadata = create_dataset_metadata(
        pca_df,
        "High-Dimensional Sample Data",
        "High-dimensional synthetic data suitable for PCA dimensionality reduction",
        [f"feature_{i+1}" for i in range(150)]
    )
    with open(f"{data_dir}/pca_sample_metadata.json", 'w') as f:
        json.dump(pca_metadata, f, indent=2)

    print(f"\nAll sample datasets generated successfully in '{data_dir}/' directory!")
    print("\nDatasets created:")
    print("1. kmeans_sample.csv - K-means clustering data")
    print("2. classification_sample.csv - Classification data")
    print("3. outlier_sample.csv - Outlier detection data")
    print("4. pca_sample.csv - High-dimensional data for PCA")


if __name__ == "__main__":
    main()