from __future__ import annotations

import os
from typing import Union, Optional, Tuple, Dict, List, Any, Iterator

import geopandas as gpd
from pyproj import CRS
from shapely.geometry.base import BaseGeometry

from common.geometry.bbox import BoundingBox
from common.geometry.geom import Geom


EXT_TO_DRIVER = {
    ".json": "GeoJSON",
    ".geojson": "GeoJSON",
    ".shp": "ESRI Shapefile",
    ".gpkg": "GPKG",
}


def _parse_epsg(epsg_str: str) -> int:
    """Parse epsg from str to int number.

    NOTE: might move to another place for interfacing all SRS representations later
    """
    epsg_str = epsg_str.lower()
    if epsg_str.startswith("epsg:"):
        epsg_no = int(epsg_str.split(":")[-1])
    else:
        epsg_no = int(epsg_str)
    return epsg_no


class VectorDataset(object):
    def __init__(self, gdf: gpd.GeoDataFrame):
        self._gdf = gdf

    @staticmethod
    def from_file(
        file_path: str,
        bbox: Optional[BoundingBox] = None,
        aoi: Optional[Union[Geom, BaseGeometry]] = None,
        rows: Optional[Union[int, slice]] = None,
        layer: Optional[str] = None,
        encoding: str = "utf-8",
    ) -> VectorDataset:
        """Create a vector dataset by reading from a given file

        Parameters
        ----------
        file_path : str
            File path to the vector dataset's underlying file,
            only support `geojson`, `gpkg` and `shp` file formats.
        bbox : Optional[BoundingBox], optional
            Bounding box for the data to read, by default None not to do spatial partial read.
        aoi : Optional[Union[Geom, BaseGeometry]], optional
            Area of interest specified as a geometry, by default None not to do spatial partial read.
        rows : Optional[Union[int, slice]], optional
            Row(s) to read, could be a single integer number for the row index, or a slice for the row range,
            by default None not to do row based partial read.
        layer : Optional[str], optional
            Name of layer to read, only meaningful when using `shp` file format (layer name)
            or `gpkg` file (table name), by default None.
        encoding : str, optional
            Encoding used to read the given file, by default "utf-8".

        Returns
        -------
        VectorDataset
            The `VectorDataset` read into the memory.

        Raises
        ------
        ValueError
            Raised when the given file is not with a supported format.
            Raised when both `bbox` and `aoi` are specified.
            Raised when the given `aoi` is not supported type of object.
        """
        # only support shp, geojson, and gpkg
        ext_name = os.path.splitext(file_path)[-1]
        if ext_name not in EXT_TO_DRIVER:
            raise ValueError("Not supported format {}".format(ext_name))

        if bbox is not None and aoi is not None:
            raise ValueError("Can only specify one of `bbox` or `aoi`.")

        tuple_bbox: Optional[Tuple] = (
            (
                bbox.left,
                bbox.bottom,
                bbox.right,
                bbox.upper,
            )
            if bbox is not None
            else None
        )

        if aoi is not None:
            if isinstance(aoi, Geom):
                mask = aoi._shapely_geom
            elif isinstance(aoi, BaseGeometry):
                mask = aoi
            else:
                raise ValueError("`aoi` must be a `Geom` or a `BaseGeometry` object.")
        else:
            mask = None

        gdf = gpd.read_file(
            file_path,
            bbox=tuple_bbox,
            mask=mask,
            rows=rows,
            layer=layer,
            encoding=encoding,
        )

        return VectorDataset(gdf)

    def to_file(
        self,
        file_path: str,
        layer: Optional[str] = None,
        encoding: str = "utf-8",
        append: bool = False,
    ):
        """Save the vector dataset to a file.

        Parameters
        ----------
        file_path : str
            Output file path.
        layer : Optional[str], optional
            Layer name, notice it is only valid for `shapefile` or `gpkg` file format, by default None.
        encoding : str, optional
            Encoding to be used, by default "utf-8".
        append : bool, optional
            Whether to operate under `append` mode,
            notice it is only valid for the `shapefile` format, by default False.

        Raises
        ------
        ValueError
            Raised if the given output `file_path` is not a supported file format.
            Raised if using the `append` mode with a file that is not a `shapefile` format.
        """
        # only support shp, geojson, and gpkg
        ext_name = os.path.splitext(file_path)[-1]
        if ext_name not in EXT_TO_DRIVER:
            raise ValueError("Not supported format {}".format(ext_name))

        if ext_name != ".shp" and append:
            raise ValueError("only support `append` mode with ESRI Shapefile.")

        if ext_name == ".shp" and append:
            self._gdf.to_file(
                file_path,
                driver=EXT_TO_DRIVER[ext_name],
                layer=layer,
                encoding=encoding,
                mode="a",
            )
        else:
            self._gdf.to_file(
                file_path,
                driver=EXT_TO_DRIVER[ext_name],
                layer=layer,
                encoding=encoding,
            )

    def to_json(self, show_bbox: bool = False, drop_id: bool = True, **kwargs) -> str:
        """Convert to json string

        Parameters
        ----------
        show_bbox : bool, optional
            Whether to include a `bbox` field, by default False not to include.
        drop_id : bool, optional
            Whether to drop the row id, by default True to drop.

        Returns
        -------
        str
            The resulting json string.
        """
        return self._gdf.to_json(
            na="null", show_bbox=show_bbox, drop_id=drop_id, **kwargs
        )

    def to_ogr_vector(self):
        raise NotImplementedError("not impl yet")

    def __len__(self) -> int:
        """Number of rows in the vector dataset"""
        return self._gdf.__len__()

    def __geo_interface__(self) -> Dict:
        """Represent the vector dataset as a GeoJSON-like FeatureCollection Python dictionary,
        following geointerface protocol."""
        return self._gdf.__geo_interface__()

    def iterfeatures(
        self, na: str = "null", show_bbox: bool = False, drop_id: bool = False
    ) -> Iterator[Dict]:
        """Iterate through the vector dataset via an iterator yields each row as a Python dictionary
        following geointerface protocol

        Parameters
        ----------
        na : str, optional
            How to output missing values, could be one of "null" (use JSON NULL),
            "drop" (remove that property from the output dictionary),
            or "keep" (using NaN), by default "null".
        show_bbox : bool, optional
            Whether to include bbox in the output, by default False.
        drop_id : bool, optional
            Whether to drop row id, by default False.

        Yields
        -------
        Dict
            yield dictionary representing each row
        """
        for row in self._gdf.iterfeatures(na=na, show_bbox=show_bbox, drop_id=drop_id):
            yield row

    @property
    def bounds(self, overall=True) -> Union[BoundingBox, List[BoundingBox]]:
        """Compute bounding box(es) of the vector dataset

        Parameters
        ----------
        overall : bool, optional
            Overall bounding box for the whole vector dataset
            or bounding box for each row, by default True.

        Returns
        -------
        Union[BoundingBox, List[BoundingBox]]
            Bounding box(es)
        """
        df_bounds = self._gdf.bounds()
        if overall:
            res = BoundingBox(
                left=df_bounds.minx.min(),
                bottom=df_bounds.miny.min(),
                right=df_bounds.maxx.max(),
                upper=df_bounds.maxy.max(),
            )
        else:
            res = []
            for item in df_bounds.itertuple():
                res.append(
                    BoundingBox(
                        left=item.minx,
                        bottom=item.miny,
                        right=item.maxx,
                        upper=item.maxy,
                    )
                )
            return res

    @property
    def centroid(self, overall=True) -> Union[Geom, List[Geom]]:
        """Compute centroid(s) for the vector dataset.

        Parameters
        ----------
        overall : bool, optional
            Overall centroid for the whole dataset,
            or centroid for each row, by default True.

        Returns
        -------
        Union[Geom, List[Geom]]
            The computed centroid(s).
        """
        if overall:
            res = Geom.from_shapely(self._gdf.dissolve().centroid[0])
            return res
        else:
            res = []
            df_centroids = self._gdf.centroid
            for item in df_centroids.iteritems():
                res.append(Geom.from_shapely(item[1]))
            return res

    @property
    def columns(self):
        """Columns in the vector dataset"""
        return self._gdf.columns

    @property
    def crs(self) -> CRS:
        """Coordinate reference system used by the vector dataset"""
        return self._gdf.crs

    def set_crs(
        self, epsg: Union[str, int], inplace: bool = False, allow_override: bool = False
    ) -> VectorDataset:
        """Set the vector dataset to use a CRS specified by a epsg code.

        Parameters
        ----------
        epsg : Union[str, int]
            The EPSG code given.
        inplace : bool, optional
            Operate in place or create a new vector dataset, by default False
        allow_override : bool, optional
            Whether to allow override existing CRS, by default False.

        Returns
        -------
        VectorDataset
            The vector dataset with CRS set.
        """
        if type(epsg) == str:
            epsg = _parse_epsg(epsg)

        if inplace:
            _ = self._gdf.set_crs(
                epsg=epsg, inplace=True, allow_override=allow_override
            )
            return self
        else:
            new_gdf = self._gdf.set_crs(
                epsg=epsg, inplace=False, allow_override=allow_override
            )
            return VectorDataset(new_gdf)

    def warp(self, epsg: Union[str, int], inplace: bool = False) -> VectorDataset:
        """Warp the vector dataset to the given CRS.

        Parameters
        ----------
        epsg : Union[str, int]
            EPSG code for the destination CRS.
        inplace : bool, optional
            Whether to operate in place or create a new vector dataset, by default False

        Returns
        -------
        VectorDataset
            The warped vector dataset.
        """
        if type(epsg) == str:
            epsg = _parse_epsg(epsg)

        if inplace:
            _ = self._gdf.to_crs(epsg=epsg, inplace=True)
            return self
        else:
            new_gdf = self._gdf.to_crs(epsg=epsg, inplace=False)
            return VectorDataset(new_gdf)

    def slice(self, columns: List[str]) -> VectorDataset:
        """Slice the vector dataset by selecting only given columns and create a new one.

        Parameters
        ----------
        columns : List[str]
            Column names to be selected.

        Returns
        -------
        VectorDataset
            The sliced vector dataset.
        """
        new_gdf = self._gdf[columns]
        return VectorDataset(new_gdf)

    def subset(self, rows: Union[int, slice]) -> VectorDataset:
        """Subset a vector dataset by selecting designated row(s) and create a new one.

        Parameters
        ----------
        rows : Union[int, slice]
            Row(s) to be selected, could be an integer for the row id,
            or a slice for the row range.

        Returns
        -------
        VectorDataset
            The subsetted vector dataset.
        """
        if type(rows) == int:
            new_gdf = self._gdf.iloc[:rows]
        elif type(rows) == slice:
            new_gdf = self._gdf.iloc[rows]

        return VectorDataset(new_gdf)

    def query(self, conditions: List[Tuple[str, str, Any]]) -> VectorDataset:
        """Query a vector dataset by given conditions and create a new vector dataset.

        Parameters
        ----------
        conditions : List[Tuple[str, str, Any]]
            A list of conditions, each condition is a tuple of (field_name, operator, field_querying_value).
            `field_name` must be a valid column name within the vector dataset.
            `operator` must be a supported operator, including: `eq`, `neq`, `gt`, `gte`, `lt`, `lte`, `contains`,
            `startswith`, `endswith`, and `inlist`.

        Returns
        -------
        VectorDataset
            The resulting vector dataset for the selected subset.
        """

        def _gen_mask(field_name, op, value):
            # following GEE style, not sure if that is the best way but I think we could start from here
            if op == "eq":
                mask = self._gdf[field_name] == value
            elif op == "neq":
                mask = self._gdf[field_name] != value
            elif op == "gt":
                mask = self._gdf[field_name] > value
            elif op == "gte":
                mask = self._gdf[field_name] >= value
            elif op == "lt":
                mask = self._gdf[field_name] < value
            elif op == "lte":
                mask = self._gdf[field_name] <= value
            elif op == "contains":
                mask = self._gdf[field_name].str.contains(value)
            elif op == "startswith":
                mask = self._gdf[field_name].str.startswith(value)
            elif op == "endswith":
                mask = self._gdf[field_name].str.endswith(value)
            elif op == "inlist":
                mask = self._gdf[field_name].isin(value)

            return mask

        field_name, op, value = conditions[0]
        mask = _gen_mask(field_name, op, value)
        for condition in conditions[1:]:
            field_name, op, value = condition
            mask_tmp = _gen_mask(field_name, op, value)
            mask &= mask_tmp
        new_gdf = self._gdf[mask]
        return VectorDataset(new_gdf)

    def spatial_query(
        self,
        aoi: Union[Geom, BaseGeometry],
        predicate: Optional[str] = None,
        sort: bool = False,
    ) -> VectorDataset:
        """Query a vector database using a spatial query condition and create a new vector dataset.

        Parameters
        ----------
        aoi : Union[Geom, BaseGeometry]
            Area of interest.
        predicate : Optional[str], optional
            Geometry relationship for the query, could be one of None, "intersects", "within",
            "contains", "overlaps", "crosses", and "touches".
            By default None, with all geometries intersecting with the **envelope** of the `aoi` being returned.
        sort : bool, optional
            Whether to sort the result in ascending order, by default False.

        Returns
        -------
        VectorDataset
            The resulting vector dataset for the subset.
        """
        aoi_shapely = aoi._shapely_geom if isinstance(aoi, Geom) else aoi
        new_gdf_inds = self._gdf.sindex.query(
            aoi_shapely, predicate=predicate, sort=sort
        )
        new_gdf = self._gdf.iloc[new_gdf_inds]
        return VectorDataset(new_gdf)

    def clip(
        self, aoi: Union[Geom, BaseGeometry], keep_geom_type: bool = False
    ) -> VectorDataset:
        """Clip to the given area of interest and create a new vector dataset.

        Parameters
        ----------
        aoi : Union[Geom, BaseGeometry]
            The given area of interest.
        keep_geom_type : bool, optional
            Whether to keep the geometry type after clipping, by default False not to keep.

        Returns
        -------
        VectorDataset
            The clipped vector dataset.
        """
        aoi_shapely = aoi._shapely_geom if isinstance(aoi, Geom) else aoi
        new_gdf = gpd.clip(self._gdf, aoi_shapely, keep_geom_type=keep_geom_type)
        return VectorDataset(new_gdf)

    def explode(
        self, ignore_index: bool = False, index_parts: bool = None
    ) -> VectorDataset:
        """Manipulate a vector dataset, by exploding multi-part geometry into multiple single-part geometry

        Parameters
        ----------
        ignore_index : bool, optional
            Whether to ignore original index, by default False
        index_parts : bool, optional
            Whether to index the exploded parts, by default None

        Returns
        -------
        VectorDataset
            The resulting vector dataset.
        """
        new_gdf = self._gdf.explode(ignore_index=ignore_index, index_parts=index_parts)
        return VectorDataset(new_gdf)
