from fastapi import FastAPI, HTTPException, UploadFile, File, BackgroundTasks, Depends
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import List, Dict, Any, Optional
import os
import tempfile
import logging
from datetime import datetime

from ..services.data_service import VectorDataService, DatasetInfo, JobInfo
from ..core.spark_engine import SparkVectorEngine

# Initialize logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Initialize FastAPI app
app = FastAPI(
    title="Vector Analytics API",
    description="REST API for vector spatial analytics with Spark",
    version="1.0.0"
)

# CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Configure based on your security requirements
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Global service instance
data_service = None


def get_data_service() -> VectorDataService:
    """Get or create data service instance."""
    global data_service
    if data_service is None:
        try:
            data_dir = os.getenv("DATA_DIR", "./data")
            redis_url = os.getenv("REDIS_URL", "redis://redis:6379")
            spark_master_url = os.getenv("SPARK_MASTER_URL", "local[*]")

            spark_config = {
                "spark.master": spark_master_url,
                "spark.driver.memory": "2g",
                "spark.executor.memory": "2g",
            }

            logger.info("Initializing VectorDataService with Spark...")
            data_service = VectorDataService(
                data_dir=data_dir,
                redis_url=redis_url,
                spark_config=spark_config
            )
            logger.info("VectorDataService initialized successfully")
        except Exception as e:
            logger.error(f"Failed to initialize VectorDataService: {str(e)}")
            raise HTTPException(status_code=503, detail=f"Service initialization failed: {str(e)}")
    return data_service


def get_data_service_safe() -> Optional[VectorDataService]:
    """Get data service without raising exceptions for health checks."""
    try:
        return get_data_service()
    except Exception as e:
        logger.warning(f"Data service not available: {str(e)}")
        return None


# Pydantic models for API
class DatasetUploadRequest(BaseModel):
    name: str
    vector_columns: List[str]
    description: Optional[str] = None
    file_format: str = "csv"


class JobSubmissionRequest(BaseModel):
    job_type: str
    input_dataset_id: str
    parameters: Dict[str, Any]
    name: Optional[str] = None


class KMeansParams(BaseModel):
    k: int = 3
    max_iter: int = 100
    seed: int = 42


class PCAParams(BaseModel):
    k: int = 10


class SimilarityParams(BaseModel):
    metric: str = "cosine"
    threshold: Optional[float] = None
    sample_size: int = 1000


class OutlierDetectionParams(BaseModel):
    method: str = "distance"
    threshold: float = 2.0


class NearestNeighborsParams(BaseModel):
    query_vector: List[float]
    k: int = 10
    metric: str = "euclidean"


class GeospatialUploadRequest(BaseModel):
    name: str
    layer_name: Optional[str] = None
    feature_type: str = "centroid"
    vector_columns: Optional[List[str]] = None
    description: Optional[str] = None
    file_format: str = "gdb"


class SpatialClusteringParams(BaseModel):
    k: int = 3
    max_iter: int = 100
    seed: int = 42
    include_spatial_metrics: bool = True


class SpatialSimilarityParams(BaseModel):
    spatial_weight: float = 0.3
    feature_weight: float = 0.7
    threshold: Optional[float] = None
    sample_size: int = 1000


@app.on_event("startup")
async def startup_event():
    """Initialize services on startup."""
    logger.info("Starting Vector Analytics API...")
    # Note: Data service will be initialized lazily on first use
    # This avoids blocking startup if Spark is not available
    logger.info("API started successfully (data service will be initialized on demand)")


@app.on_event("shutdown")
async def shutdown_event():
    """Cleanup on shutdown."""
    logger.info("Shutting down Vector Analytics API...")
    if data_service:
        data_service.cleanup()
    logger.info("API shutdown complete")


# Health check
@app.get("/health")
async def health_check():
    """Health check endpoint."""
    # Check if data service is available without forcing initialization
    service_status = "available" if data_service is not None else "not_initialized"

    return {
        "status": "healthy",
        "timestamp": datetime.now().isoformat(),
        "service": "vector-analytics-api",
        "data_service": service_status
    }


# Dataset endpoints
@app.post("/api/v1/datasets/upload", response_model=DatasetInfo)
async def upload_dataset(
    file: UploadFile = File(...),
    request: DatasetUploadRequest = Depends()
):
    """Upload a new dataset."""
    try:
        # Save uploaded file temporarily
        with tempfile.NamedTemporaryFile(delete=False, suffix=f"_{file.filename}") as temp_file:
            content = await file.read()
            temp_file.write(content)
            temp_file_path = temp_file.name

        # Process with data service
        service = get_data_service()
        dataset_info = service.upload_dataset(
            file_path=temp_file_path,
            name=request.name,
            vector_columns=request.vector_columns,
            description=request.description,
            file_format=request.file_format
        )

        # Clean up temp file
        os.unlink(temp_file_path)

        return dataset_info

    except Exception as e:
        logger.error(f"Dataset upload failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


@app.get("/api/v1/datasets", response_model=List[DatasetInfo])
async def list_datasets():
    """List all datasets."""
    try:
        service = get_data_service()
        datasets = service.list_datasets()
        return datasets
    except Exception as e:
        logger.error(f"Failed to list datasets: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/api/v1/datasets/{dataset_id}", response_model=DatasetInfo)
async def get_dataset(dataset_id: str):
    """Get dataset information."""
    try:
        service = get_data_service()
        datasets = service.list_datasets()
        dataset = next((d for d in datasets if d.id == dataset_id), None)

        if not dataset:
            raise HTTPException(status_code=404, detail="Dataset not found")

        return dataset
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get dataset {dataset_id}: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.delete("/api/v1/datasets/{dataset_id}")
async def delete_dataset(dataset_id: str):
    """Delete a dataset."""
    try:
        service = get_data_service()
        # Implementation would depend on your storage system
        # This is a placeholder
        return {"message": f"Dataset {dataset_id} deleted successfully"}
    except Exception as e:
        logger.error(f"Failed to delete dataset {dataset_id}: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# Geospatial data endpoints
@app.post("/api/v1/datasets/upload-geospatial", response_model=DatasetInfo)
async def upload_geospatial_dataset(
    file: UploadFile = File(...),
    request: GeospatialUploadRequest = Depends()
):
    """Upload a geospatial dataset (GDB, GeoPackage, Shapefile)."""
    try:
        # Save uploaded file temporarily
        with tempfile.NamedTemporaryFile(delete=False, suffix=f"_{file.filename}") as temp_file:
            content = await file.read()
            temp_file.write(content)
            temp_file_path = temp_file.name

        # Process with data service
        service = get_data_service()
        dataset_info = service.upload_geospatial_dataset(
            file_path=temp_file_path,
            name=request.name,
            layer_name=request.layer_name,
            feature_type=request.feature_type,
            vector_columns=request.vector_columns,
            description=request.description,
            file_format=request.file_format
        )

        # Clean up temp file
        if os.path.exists(temp_file_path):
            if os.path.isdir(temp_file_path):
                import shutil
                shutil.rmtree(temp_file_path)
            else:
                os.unlink(temp_file_path)

        return dataset_info

    except Exception as e:
        logger.error(f"Geospatial dataset upload failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


@app.get("/api/v1/datasets/{dataset_id}/geospatial-metadata")
async def get_geospatial_metadata(dataset_id: str):
    """Get geospatial metadata for a dataset."""
    try:
        service = get_data_service()
        datasets = service.list_datasets()
        dataset = next((d for d in datasets if d.id == dataset_id), None)

        if not dataset:
            raise HTTPException(status_code=404, detail="Dataset not found")

        if not dataset.metadata or not dataset.metadata.get("geospatial"):
            raise HTTPException(status_code=400, detail="Dataset is not geospatial")

        metadata = service.get_geospatial_metadata(dataset.file_path)
        return metadata

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get geospatial metadata: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# Job endpoints
@app.post("/api/v1/jobs/submit", response_model=JobInfo)
async def submit_job(request: JobSubmissionRequest, background_tasks: BackgroundTasks):
    """Submit a computation job."""
    try:
        service = get_data_service()
        job_info = service.submit_job(
            job_type=request.job_type,
            input_dataset_id=request.input_dataset_id,
            parameters=request.parameters,
            name=request.name
        )

        # Queue job for background processing
        background_tasks.add_task(process_job_background, job_info.id)

        return job_info

    except Exception as e:
        logger.error(f"Job submission failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


@app.get("/api/v1/jobs/{job_id}", response_model=JobInfo)
async def get_job_status(job_id: str):
    """Get job status."""
    try:
        service = get_data_service()
        job_info = service.get_job_status(job_id)

        if not job_info:
            raise HTTPException(status_code=404, detail="Job not found")

        return job_info
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get job status {job_id}: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/api/v1/jobs", response_model=List[JobInfo])
async def list_jobs():
    """List all jobs."""
    try:
        service = get_data_service()
        # Implementation would depend on your job storage system
        # This is a placeholder
        return []
    except Exception as e:
        logger.error(f"Failed to list jobs: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/api/v1/jobs/{job_id}/result")
async def get_job_result(job_id: str):
    """Get job result."""
    try:
        service = get_data_service()
        job_info = service.get_job_status(job_id)

        if not job_info:
            raise HTTPException(status_code=404, detail="Job not found")

        if job_info.status != "completed":
            raise HTTPException(status_code=400, detail="Job not completed")

        if not job_info.result_path:
            raise HTTPException(status_code=404, detail="Result not found")

        # Load result from file
        import json
        with open(job_info.result_path, 'r') as f:
            result = json.load(f)

        return result

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Failed to get job result {job_id}: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# Specific job type endpoints
@app.post("/api/v1/vector/kmeans")
async def kmeans_clustering(
    dataset_id: str,
    params: KMeansParams,
    background_tasks: BackgroundTasks
):
    """Submit K-means clustering job."""
    request = JobSubmissionRequest(
        job_type="kmeans",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/vector/pca")
async def pca_analysis(
    dataset_id: str,
    params: PCAParams,
    background_tasks: BackgroundTasks
):
    """Submit PCA analysis job."""
    request = JobSubmissionRequest(
        job_type="pca",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/vector/similarity")
async def similarity_analysis(
    dataset_id: str,
    params: SimilarityParams,
    background_tasks: BackgroundTasks
):
    """Submit similarity analysis job."""
    request = JobSubmissionRequest(
        job_type="similarity",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/vector/outliers")
async def outlier_detection(
    dataset_id: str,
    params: OutlierDetectionParams,
    background_tasks: BackgroundTasks
):
    """Submit outlier detection job."""
    request = JobSubmissionRequest(
        job_type="outlier_detection",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/vector/statistics")
async def vector_statistics(
    dataset_id: str,
    background_tasks: BackgroundTasks
):
    """Submit vector statistics job."""
    request = JobSubmissionRequest(
        job_type="statistics",
        input_dataset_id=dataset_id,
        parameters={}
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/vector/nearest-neighbors")
async def nearest_neighbors(
    dataset_id: str,
    params: NearestNeighborsParams,
    background_tasks: BackgroundTasks
):
    """Submit nearest neighbors search job."""
    request = JobSubmissionRequest(
        job_type="nearest_neighbors",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


# Real-time analysis endpoints (synchronous)
@app.post("/api/v1/vector/kmeans/realtime")
async def kmeans_clustering_realtime(
    dataset_id: str,
    params: KMeansParams
):
    """Execute K-means clustering synchronously."""
    try:
        service = get_data_service()
        result = service.execute_job_sync(
            "kmeans",
            dataset_id,
            params.dict()
        )
        return result
    except Exception as e:
        logger.error(f"Real-time K-means failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


@app.post("/api/v1/vector/statistics/realtime")
async def vector_statistics_realtime(dataset_id: str):
    """Execute vector statistics synchronously."""
    try:
        service = get_data_service()
        result = service.execute_job_sync(
            "statistics",
            dataset_id,
            {}
        )
        return result
    except Exception as e:
        logger.error(f"Real-time statistics failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


# Spatial analysis endpoints
@app.post("/api/v1/spatial/clustering")
async def spatial_clustering(
    dataset_id: str,
    params: SpatialClusteringParams,
    background_tasks: BackgroundTasks
):
    """Submit spatial clustering job."""
    request = JobSubmissionRequest(
        job_type="spatial_clustering",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/spatial/similarity")
async def spatial_similarity(
    dataset_id: str,
    params: SpatialSimilarityParams,
    background_tasks: BackgroundTasks
):
    """Submit spatial similarity analysis job."""
    request = JobSubmissionRequest(
        job_type="spatial_similarity",
        input_dataset_id=dataset_id,
        parameters=params.dict()
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/spatial/statistics")
async def spatial_statistics(
    dataset_id: str,
    background_tasks: BackgroundTasks
):
    """Submit spatial statistics job."""
    request = JobSubmissionRequest(
        job_type="geospatial_statistics",
        input_dataset_id=dataset_id,
        parameters={}
    )
    return await submit_job(request, background_tasks)


@app.post("/api/v1/spatial/clustering/realtime")
async def spatial_clustering_realtime(
    dataset_id: str,
    params: SpatialClusteringParams
):
    """Execute spatial clustering synchronously."""
    try:
        service = get_data_service()
        result = service.execute_spatial_clustering(dataset_id, params.dict())
        return result
    except Exception as e:
        logger.error(f"Real-time spatial clustering failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


@app.post("/api/v1/spatial/similarity/realtime")
async def spatial_similarity_realtime(
    dataset_id: str,
    params: SpatialSimilarityParams
):
    """Execute spatial similarity analysis synchronously."""
    try:
        service = get_data_service()
        result = service.execute_spatial_similarity(dataset_id, params.dict())
        return result
    except Exception as e:
        logger.error(f"Real-time spatial similarity failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


@app.post("/api/v1/spatial/statistics/realtime")
async def spatial_statistics_realtime(dataset_id: str):
    """Execute spatial statistics synchronously."""
    try:
        service = get_data_service()
        result = service.execute_geospatial_statistics(dataset_id)
        return result
    except Exception as e:
        logger.error(f"Real-time spatial statistics failed: {str(e)}")
        raise HTTPException(status_code=400, detail=str(e))


# Helper functions
async def process_job_background(job_id: str):
    """Process job in background."""
    try:
        service = get_data_service()
        service.execute_job(job_id)
        logger.info(f"Job {job_id} completed successfully")
    except Exception as e:
        logger.error(f"Job {job_id} failed: {str(e)}")


if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)