import os
import json
import torch
import datetime
import torch.nn as nn
import torch.backends.cudnn
import torch.cuda.comm as comm
from tqdm import tqdm as tqdm
from torch.utils.data import DataLoader
import numpy as np
from typing import Optional, Tuple, List
from catalog.place.place_api import PlaceAPI, PlaceRecord
from common.geometry import Geom
from common.geoimage.raster_dataset import RasterDataset

from analytics.crop_recognition.datasets.inference_dataset import (
    TileDataset,
)
from analytics.crop_recognition.models.model_saving_obj import (
    ModelSavingObject,
    FMT_DATETIME,
)
from analytics.crop_recognition.common_settings.data_config import (
    SUPPORTED_TEMPORAL_OFFSETS,
)
import analytics.crop_recognition.common_settings.model_config as config


class CropRecognitionInference:
    def __init__(
        self,
        save_folder,
        gpu_ids: Optional[Tuple[int]],
        patch_size=1024,
        num_workers=4,
        thread_num=2,
    ) -> None:
        """
        Initialization of the CropRecognition Inference class.
        The result will be saved as tile inference result and save in save_folder path.
        Currently, gpu_ids list must be given

        Parameters
        ----------
        save_folder : str
            Where to save the inference result. The save_folder must exist before initializing
            the inference class
        gpu_ids : Optional[Tuple[int]]
            Which GPU to use, if not given, CPU will be used for inference.
            Currently, only GPU inference is supported. If None is given as input, Exception will raise.
        patch_size : int, optional
            The inference patch size of the raster, recommended to use multiples of 256, which is the default
            block size to save raster in satellite platform, by default 1024
        num_workers : int, optional
            How many workers (processes) to use for building input data, by default 5
        thread_num : int, optional
            How many thread to use for extracting raster images, by default 15

        Raises
        ------
        Exception
            _description_
        """

        if not os.path.exists(save_folder):
            raise Exception(
                "Target folder not found, please create target folder first."
            )
        if isinstance(gpu_ids, list):
            gpu_ids = tuple(gpu_ids)
        self.gpu_ids = gpu_ids
        self.save_folder = save_folder
        # TODO: patch size should be determined in inference func.
        # Basically, for different model and time length, the patch size
        # should be different. An idea is to keep trying different patch size,
        # to fit maximum GPU memory until the best one is selected.
        self.patch_size = patch_size
        self.num_workers = num_workers
        self.thread_num = thread_num

    def update_savefolder(self, new_savefolder):
        self.save_folder = new_savefolder

    def inference_model_with_dataset_gpu(self, replicas, dataset: TileDataset):
        # torch.backends.cudnn.enabled = False
        # For LSTM / RNN inference, enable cudnn will not fully utilize GPU memory.

        dataloader = DataLoader(
            dataset, 1, False, num_workers=self.num_workers, drop_last=False
        )
        data_timelen = dataset.time_len
        result_rst_meta = dataset.get_dataset_meta()
        result_arr_class = np.zeros(
            (result_rst_meta.n_rows, result_rst_meta.n_cols, data_timelen),
            dtype=np.uint8,
        )
        result_arr_confidence = np.zeros(
            (result_rst_meta.n_rows, result_rst_meta.n_cols, data_timelen),
            dtype=np.uint8,
        )
        with torch.no_grad():
            for idx, (input_x, input_x_mask) in enumerate(dataloader):
                cur_src_window = dataset.get_src_window(idx)
                input_x = torch.squeeze(input_x, dim=0)  # make it in collate function
                input_x_mask = torch.squeeze(
                    input_x_mask, dim=0
                )  # make it in collate function
                input_x = comm.scatter(input_x, devices=self.gpu_ids)
                input_x_mask = comm.scatter(input_x_mask, devices=self.gpu_ids)
                cur_replicas = replicas[: len(input_x)]
                outputs = nn.parallel.parallel_apply(
                    cur_replicas, list(zip(input_x, input_x_mask))
                )  # N, T, C
                outputs = comm.gather(outputs, destination="cpu")
                confidence_res, class_res = torch.max(outputs, dim=-1)  # N, T, C
                class_res = (
                    torch.reshape(
                        class_res,
                        (
                            cur_src_window.y_size,
                            cur_src_window.x_size,
                            data_timelen,
                        ),
                    )
                    .numpy()
                    .astype(np.uint8)
                )
                confidence_res = confidence_res * 100
                confidence_res = (
                    torch.reshape(
                        confidence_res,
                        (cur_src_window.y_size, cur_src_window.x_size, data_timelen),
                    )
                    .numpy()
                    .astype(np.uint8)
                )

                row_start = cur_src_window.y_off
                row_end = cur_src_window.y_off + cur_src_window.y_size

                col_start = cur_src_window.x_off
                col_end = cur_src_window.x_off + cur_src_window.x_size

                result_arr_class[row_start:row_end, col_start:col_end] = class_res
                result_arr_confidence[
                    row_start:row_end, col_start:col_end
                ] = confidence_res

        del input_x
        del outputs
        del class_res
        del confidence_res
        torch.cuda.empty_cache()
        result_arr_class = np.moveaxis(result_arr_class, -1, 0)
        result_arr_confidence = np.moveaxis(result_arr_confidence, -1, 0)
        class_rst = RasterDataset.from_ndarray(
            result_arr_class, result_rst_meta.update(n_bands=data_timelen)
        )
        confidence_rst = RasterDataset.from_ndarray(
            result_arr_confidence, result_rst_meta.update(n_bands=data_timelen)
        )
        return class_rst, confidence_rst

    def __check_skip_flag(
        self, tif_class_fpath, tif_confidence_fpath, json_fpath, skip_exists
    ):
        """
        Check whether to skip the current tile
        """

        skip_flag = False
        if (
            os.path.exists(tif_class_fpath)
            and os.path.exists(tif_confidence_fpath)
            and os.path.exists(json_fpath)
        ):
            skip_flag = True

            if not skip_exists:
                if os.path.exists(tif_class_fpath):
                    os.remove(tif_class_fpath)

                if os.path.exists(tif_confidence_fpath):
                    os.remove(tif_confidence_fpath)

                if os.path.exists(json_fpath):
                    os.remove(json_fpath)
                skip_flag = False

        return skip_flag

    def __build_scene_meta(
        self,
        predict_year,
        inference_temporal: Tuple[datetime.datetime, datetime.datetime],
        crop_type_list,
        mgrs_tile,
        processed_time: datetime.datetime,
    ):
        scene_meta = {
            "class_of_interests": ["nodata", "negative"] + crop_type_list,
            "uid": "{}_{}".format(mgrs_tile, predict_year),
            "year": str(predict_year),
            "start_datetime": inference_temporal[0].strftime(FMT_DATETIME),
            "end_datetime": inference_temporal[1].strftime(FMT_DATETIME),
            "mgrs_tile": mgrs_tile,
            "processed_datetime": processed_time.strftime(FMT_DATETIME),
            "image_assets": {
                "class": f"{mgrs_tile}_{predict_year}_class.tif",
                "confidence": f"{mgrs_tile}_{predict_year}_confidence.tif",
            },
        }

        return scene_meta

    def predict_aoi(
        self,
        predict_year: int,
        crop_type_list: List[str],
        model_path: str,
        geom_aoi: Geom = None,
        mgrs_list: List[str] = None,
        skip_exists=True,
    ):
        """
        predict the AOI for specific crop type at specific year.
        Output will be uint 16 raster. Number of label value will be 2 +
        len(crop_type_list), where 0 is preserved as Nodata and 1 is preserved
        as negative. The others follow the index of crop_type_list.

        Parameters
        ----------
        predict_year : int
            In which year the crop is planted. For winter wheat, for example
            planted in Oct 2021 and harvest in Apr 2022, please use 2021 as
            argument.
        crop_type_list : List[str]
            List of crop types, for example ["soybean", "corn"], all the crop
            types should be supported by the model, which is defined in the
            model parameter file. Otherwise, Exception will raise.
        model_path : str
            Path the model parameter path, must be given. If model path doesn't
            fit predefined model, exception will raise.
        geom_aoi : Geom
            Geom of Area of Interest.
        mgrs_list: List[str]
            List of MGRS tiles to be inferenced.
        skip_exists: bool
            Whether to skip the exists files.
        """

        assert not (
            geom_aoi is None and mgrs_list is None
        ), "Set at least one of geom_aoi or mgrs_list"

        model_meta_obj: ModelSavingObject = torch.load(model_path, map_location="cpu")
        print(
            f"now inferencing with {model_meta_obj.get_model_name()} for {model_meta_obj.get_region()}"
        )
        class_of_interests = model_meta_obj.get_class_of_interests()

        for crop_type in crop_type_list:
            assert crop_type.lower() in class_of_interests

        model = model_meta_obj.get_model_instance()
        # ----- determine time period for inference ----- #
        # - "origin": starting from Jan 1st, working for summer/spring crops
        # - "shifted": starting from Aug 1st, working for winter crops
        temporal_type = model_meta_obj.data_dict["temporal_type"]
        temporal_offset = SUPPORTED_TEMPORAL_OFFSETS[temporal_type]
        predict_year_start_date = datetime.datetime(predict_year, 1, 1)
        inference_start_date = predict_year_start_date + temporal_offset[0]
        inference_temporal = (
            inference_start_date,
            min(
                predict_year_start_date + temporal_offset[1],
                datetime.datetime.now(),
            ),
        )
        # ----------------------------------------------- #

        model.to(torch.device("cuda:{}".format(self.gpu_ids[0])))
        model.eval()

        replicas = nn.parallel.replicate(model, self.gpu_ids)

        if mgrs_list is None:
            mgrs_tileset = PlaceAPI.get_mgrs_set_by_geom(geom_aoi)
            mgrs_list = sorted(list(mgrs_tileset))
        target_geom = geom_aoi
        for _, mgrs_tile in enumerate(tqdm(mgrs_list)):
            dest_tif_class_fpath = os.path.join(
                self.save_folder, f"{mgrs_tile}_{predict_year}_class.tif"
            )
            dest_tif_confidence_fpath = os.path.join(
                self.save_folder, f"{mgrs_tile}_{predict_year}_confidence.tif"
            )
            dest_json_fpath = os.path.join(
                self.save_folder, f"{mgrs_tile}_{predict_year}.json"
            )
            skip_flag = self.__check_skip_flag(
                dest_tif_class_fpath,
                dest_tif_confidence_fpath,
                dest_json_fpath,
                skip_exists,
            )
            if skip_flag:
                continue

            cur_dataset = TileDataset(
                mgrs_tile,
                inference_temporal,
                self.patch_size,
                model_meta_obj.get_data_delta_timeslot(),
                target_geom,
                asset_names=model_meta_obj.get_data_asset_names(),
                thread_num=self.thread_num,
            )

            cur_rst_class, cur_rst_confidence = self.inference_model_with_dataset_gpu(
                replicas, cur_dataset
            )

            cur_arr = cur_rst_class.data
            result_arr = np.ones_like(cur_arr, dtype=np.uint16)
            for target_idx, crop_type in enumerate(crop_type_list, 2):
                inference_idx = class_of_interests.index(crop_type)
                result_arr[
                    cur_arr == inference_idx
                ] = target_idx  # TODO: Use array operation

            result_rst_class = RasterDataset.from_ndarray(
                result_arr, cur_rst_class.meta
            )
            if target_geom is not None:
                result_rst_class = result_rst_class.paint(target_geom, 0, inverse=True)
                cur_rst_confidence = cur_rst_confidence.paint(
                    target_geom, 0, inverse=True
                )

            result_rst_class.to_geotiff(dest_tif_class_fpath)
            cur_rst_confidence.to_geotiff(dest_tif_confidence_fpath)
            # Build scene_meta for ingestion
            model_time = datetime.datetime.fromtimestamp(os.path.getmtime(model_path))
            scene_meta = self.__build_scene_meta(
                predict_year, inference_temporal, crop_type_list, mgrs_tile, model_time
            )
            with open(dest_json_fpath, "w") as f:
                json.dump(scene_meta, f, indent=4)

        for idx in range(len(replicas)):
            replicas[idx] = None

        del replicas
        del model
        del model_meta_obj
        torch.cuda.empty_cache()
        total_memory = sum([torch.cuda.memory_allocated(i) for i in self.gpu_ids])
        print(
            "total memory usage after clearing cache: {} MB".format(
                total_memory / (2 ** 20)
            )
        )


if __name__ == "__main__":
    save_folder = f"/NAS6/Members/linchenxi/projects/crop_recognition/inference/{config.region}/{config.name}"
    os.makedirs(save_folder, exist_ok=True)
    model_path = f"/NAS6/Members/linchenxi/projects/crop_recognition/checkpoints/{config.region}/{config.name}/saved_checkpoint_best.pth"  # noqa:E501
    gpu_ids = [0, 1, 2, 3, 4, 5, 6, 7]
    crop_type = config.class_of_interests
    crop_type.remove("negative")
    crop_type = [crop.lower() for crop in crop_type]
    target_region = config.liaoning
    geom_aoi = Geom.create_an_empty_geom()
    for town in target_region.keys():
        town_item = PlaceRecord.query_many_items(target_name=town)
        geom_aoi = geom_aoi.union(town_item[0].geom)
    mgrs_set = set({})
    for region in target_region:
        mgrs_set = mgrs_set.union(
            PlaceAPI.get_mgrs_set_with_place_name(
                target_name=region, region_level=target_region[region]
            )
        )
    shard_num = 1  # total machine
    cur_idx = 0  # machine index
    print(f"inference with idx {cur_idx} of shard {shard_num}")

    for year in [2023]:
        cur_output_folder = os.path.join(save_folder, str(year))
        if not os.path.exists(cur_output_folder):
            os.makedirs(cur_output_folder)
        filter_mgrs_set = mgrs_set.copy()
        for tile in mgrs_set:
            if f"{tile}_{year}.json" in os.listdir(cur_output_folder):
                filter_mgrs_set.remove(tile)
        inferencer = CropRecognitionInference(
            cur_output_folder, gpu_ids, patch_size=1000
        )

        inferencer.predict_aoi(
            year,
            crop_type,
            model_path=model_path,
            geom_aoi=geom_aoi,
            mgrs_list=sorted(list(filter_mgrs_set))[cur_idx::shard_num],
        )
