# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''Module providing dataset functions'''
import os
import abc
import bisect

import numpy as np

import mindspore.dataset as ds
from mindspore.communication import get_rank, get_group_size

from mindspore import context

FEATURE_DICT = {'Z500': (7, 0), 'T850': (10, 2), 'U10': (-3, 0), 'T2M': (-1, 0)}
SIZE_DICT = {0.25: [721, 1440], 0.5: [360, 720], 1.4: [128, 256]}

class Data:
    """
    This class is the base class of Dataset.

    Args:
        root_dir (str, optional): The root dir of input data. Default: ".".

    Raises:
        TypeError: If the type of train_dir is not str.
        TypeError: If the type of test_dir is not str.

    Supported Platforms:
        ``Ascend`` ``GPU``
    """

    def __init__(self, root_dir="."):
        # self.train_dir = os.path.join(root_dir, "train")
        # self.valid_dir = os.path.join(root_dir, 'valid')
        # self.test_dir = os.path.join(root_dir, "test")
        self.root_dir = root_dir

    @abc.abstractmethod
    def __getitem__(self, index):
        """Defines behavior for when an item is accessed. Return the corresponding element for given index."""
        raise NotImplementedError(
            "{}.__getitem__ not implemented".format(self.dataset_type))

    @abc.abstractmethod
    def __len__(self):
        """Return length of dataset"""
        raise NotImplementedError(
            "{}.__len__ not implemented".format(self.dataset_type))


class ECMWFIFS(Data):
    """
    This class is used to process Dem Super resolution data, and is used to generate the dataset generator supported by
    MindSpore. This class inherits the Data class.

    Args:
        data_params (dict): dataset-related configuration of the model.
        run_mode (str, optional): whether the dataset is used for training, evaluation or testing. Supports [“train”,
            “test”, “valid”]. Default: 'train'.

    Supported Platforms:
        ``Ascend`` ``GPU``

    Examples:
        >>> from mindearth.data import DemData
        >>> data_params = {
        ...     'name': 'nasadem',
        ...     'root_dir': './dataset',
        ...     'patch_size': 32,
        ...     'batch_size': 64,
        ...     'epoch_size': 10,
        ...     'num_workers': 1,
        ...     't_out_train': '',
        ... }
        >>> dataset_generator = DemData(data_params)
    """

    def __init__(self,
                 data_params,
                 run_mode='train'):
        super(ECMWFIFS, self).__init__(data_params['root_dir'])
        self.run_mode = run_mode
        self.data_params = data_params
        self.t_in = data_params.get('t_in')
        self.feature_dims = data_params.get('feature_dims')
        self.mask_dims = data_params.get('mask_dims')
        self.grid_num = data_params.get('grid_num')
        self.pred_lead_time = data_params.get('pred_lead_time')
        self.data_frequency = data_params.get('data_frequency')
        
        # self.valid_interval = data_params.get('valid_interval') * self.data_frequency
        # self.test_interval = data_params.get('test_interval') * self.data_frequency
        # self.train_interval = data_params.get('train_interval') * self.data_frequency
        # self.train_period = data_params.get('train_period')
        # self.valid_period = data_params.get('valid_period')
        # self.test_period = data_params.get('test_period')
        self.statistic_dir = os.path.join(data_params['root_dir'], "OSI_ASTE_statistic")
        self.mask = self._process_mask()
        self._get_statistic()
        if run_mode in ['train', 'valid', 'test']:
            self.dataset_list = list(data_params.get(f"{run_mode}_dataset").keys())
            # print(f"dataset_list: {self.dataset_list}")
            self.t_out = data_params.get(f't_out_{run_mode}')
            self.dataset_cfg = self.data_params.get(f'{run_mode}_dataset')
            # self.path = self.train_dir
            # self.interval = self.train_interval
            self.start_year_month = self.get_start_year_month(self.dataset_list, self.dataset_cfg)
            # print(self.start_year_month)

        # elif run_mode == 'valid':
        #     self.t_out = data_params['t_out_valid']
        #     self.path = self.valid_dir
        #     self.interval = self.valid_interval
        #     self.start_year = self.valid_period[0]
        #     self.start_month = 1

        # else:
        #     self.t_out = data_params['t_out_test']
        #     self.path = self.test_dir
        #     self.interval = self.test_interval
        #     self.start_year = self.test_period[0]
        #     self.start_month = 1

    def get_start_year_month(self, dataset_list, dataset_cfg):
        
        start_year_month_list = list()
        for dataset in dataset_list:
            start_year_month_list.append((dataset_cfg.get(dataset)[0], 1))
        return start_year_month_list

    def _process_mask(self):
        mask = np.load(os.path.join(self.statistic_dir, 'mask_unique.npy'))
        mask = np.expand_dims(mask, axis=1)
        # print(f"in _process_mask: {np.sum(mask)}")
        return mask
            
    def _get_statistic(self):
        self.mean = np.load(os.path.join(self.statistic_dir, self.data_params.get('mean_file_name', 'sic_sit_temp_mean_2002_2012.npy')))
        self.std = np.load(os.path.join(self.statistic_dir, self.data_params.get('std_file_name', 'sic_sit_temp_std_2002_2012.npy')))
        
    def _normalize(self, x):
        x = (x - self.mean) / self.std
        return x

    def _get_file_count(self):
        count = 0
        self.dataset_num = list()
        for dataset in self.dataset_list:
            period = self.dataset_cfg.get(dataset)
            cur_data_path = os.path.join(self.root_dir, dataset, self.run_mode)
            # print(f"cur_data_path:{cur_data_path}")
            file_list = os.listdir(cur_data_path)
            cur_mode_data = 0
            for f in file_list:
                if period[0] <= int(f) <= period[1]:
                    tmp_lst = os.listdir(os.path.join(cur_data_path, f))
                    cur_data_num = len(tmp_lst)
                    assert cur_data_num > 0, f"For `{dataset}`, the numbers should be greater than 0."
                    cur_mode_data += cur_data_num
            cur_mode_length = cur_mode_data - (self.t_in + self.t_out + self.pred_lead_time - 1)
            count += cur_mode_length
            self.dataset_num.append(count)
            # print(f"count: {count}")
        # print(f"dataset num: {self.dataset_num}")
        return count    

    def __len__(self):
        length = self._get_file_count()
        # length = length -  * len(self.dataset_list)
        # if self.run_mode == 'train':
        #     self.train_len = self._get_file_count(self.dataset_list)
        #     length = (self.train_len -
        #               (self.t_out + self.t_in + self.pred_lead_time - 1)) # * self.pred_lead_time)
        #     # length = 1

        # elif self.run_mode == 'valid':
        #     self.valid_len = self._get_file_count(self.valid_dir, self.valid_period)
        #     length = (self.valid_len -
        #               (self.t_out + self.t_in + self.pred_lead_time - 1)) # * self.pred_lead_time)
        # else:
        #     self.test_len = self._get_file_count(self.test_dir, self.test_period)
        #     length = (self.test_len -
        #               (self.t_out + self.t_in + self.pred_lead_time - 1)) # * self.pred_lead_time)
        #     # length = 1
        return length
    
    def get_year_month(self, idx, data_idx):
        start_year, start_month = self.start_year_month[data_idx]
        total_month = idx + start_month
        year = total_month // 12 + start_year
        month = total_month % 12
        if not month:
            month = 12
            year = year - 1
        file_name = f'{year}/sic_sit_temp_{str(month).zfill(2)}.npy'
        return file_name 
    
    def __getitem__(self, idx):
        inputs_lst = []
        label_lst = []
        # print(f"idx : {idx}")
        data_idx = bisect.bisect_left(self.dataset_num, idx+1)
        # print(f"data idx: {data_idx}")
        # print("="*50)
        for t in range(self.t_in):
            cur_input_data_idx = idx + t
            if data_idx == 0:
                file_name = self.get_year_month(cur_input_data_idx, data_idx)
            else:
                file_name = self.get_year_month(cur_input_data_idx - self.dataset_num[data_idx - 1], data_idx)
            # print(file_name)
            x = np.load(os.path.join(self.root_dir, self.dataset_list[data_idx], self.run_mode, file_name)).astype(np.float32)
            x = self._normalize(x)
            x = np.concatenate((x, self.mask), axis=1)
            inputs_lst.append(x)
        x = np.stack(inputs_lst, axis=0).astype(np.float32)
        x = x.transpose(1, 0, 2).reshape(self.grid_num, self.t_in*(self.feature_dims+self.mask_dims))
        # print("-"*50)
        for t in range(self.t_out):
            # cur_label_data_idx = idx + (self.t_in + t) * self.pred_lead_time
            cur_label_data_idx = idx + self.t_in + t + self.pred_lead_time - 1
            if data_idx == 0:
                file_name = self.get_year_month(cur_label_data_idx, data_idx)
            else:
                file_name = self.get_year_month(cur_label_data_idx - self.dataset_num[data_idx - 1], data_idx)
            # print(file_name)
            label = np.load(os.path.join(self.root_dir, self.dataset_list[data_idx], self.run_mode, file_name)).astype(np.float32)
            label = self._normalize(label)
            # label = np.concatenate((label, self.mask), axis=1)
            label_lst.append(label)
        labels = np.stack(label_lst, axis=0).astype(np.float32)
        
        return x, labels
    
class Dataset:
    """
    Create the dataset for training, validation and testing,
    and output an instance of class mindspore.dataset.GeneratorDataset.

    Args:
        dataset_generator (Data): the data generator of weather dataset.
        distribute (bool, optional): whether or not to perform parallel training. Default: False.
        num_workers (int, optional): number of workers(threads) to process the dataset in parallel. Default: 1.
        shuffle (bool, optional): whether or not to perform shuffle on the dataset. Random accessible input is
                required. Default: True, expected order behavior shown in the table.

    Supported Platforms:
        ``Ascend`` ``GPU``

    Examples:
        >>> from mindearth.data import Era5Data, Dataset
        >>> data_params = {
        ...     'name': 'era5',
        ...     'root_dir': './dataset',
        ...     'feature_dims': 69,
        ...     't_in': 1,
        ...     't_out_train': 1,
        ...     't_out_valid': 20,
        ...     't_out_test': 20,
        ...     'valid_interval': 1,
        ...     'test_interval': 1,
        ...     'train_interval': 1,
        ...     'pred_lead_time': 6,
        ...     'data_frequency': 6,
        ...     'train_period': [2015, 2015],
        ...     'valid_period': [2016, 2016],
        ...     'test_period': [2017, 2017],
        ...     'patch': True,
        ...     'patch_size': 8,
        ...     'batch_size': 8,
        ...     'num_workers': 1,
        ...     'grid_resolution': 1.4,
        ...     'h_size': 128,
        ...     'w_size': 256
        ... }
        >>> dataset_generator = Era5Data(data_params)
        >>> dataset = Dataset(dataset_generator)
        >>> train_dataset = dataset.create_dataset(1)
    """

    def __init__(self,
                 dataset_generator, distribute=False, num_workers=1, shuffle=True):
        self.distribute = distribute
        self.num_workers = num_workers
        self.dataset_generator = dataset_generator
        self.shuffle = shuffle

        if distribute:
            self.rank_id = get_rank()
            self.rank_size = get_group_size()

    def create_dataset(self, batch_size):
        """
        create dataset.

        Args:
            batch_size (int, optional): An int number of rows each batch is created with.

        Returns:
            BatchDataset, dataset batched.
        """
        ds.config.set_prefetch_size(1)
        dataset = ds.GeneratorDataset(self.dataset_generator,
                                      ['inputs', 'labels'],
                                      shuffle=self.shuffle,
                                      num_parallel_workers=self.num_workers)
        if self.distribute:
            distributed_sampler_train = ds.DistributedSampler(self.rank_size, self.rank_id)
            dataset.use_sampler(distributed_sampler_train)

        dataset_batch = dataset.batch(batch_size=batch_size, drop_remainder=True,
                                      num_parallel_workers=self.num_workers)
        return dataset_batch


if __name__ == "__main__":
    print(f"pid: {os.getpid()}")
    np.set_printoptions(threshold=np.inf)
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=2)
    data_params = {
                    'name': 'ECMWF-IFS',
                    'root_dir': '/home/ma-user/work/pro_data',
                    'feature_dims': 49,
                    'mask_dims': 1,
                    't_in': 1,
                    't_out_train': 1,
                    't_out_valid': 1,
                    't_out_test': 1,
                    # 'valid_interval': 1,
                    # 'test_interval': 1,
                    # 'train_interval': 1,
                    'pred_lead_time': 6,
                    'data_frequency': 1,
                    'train_dataset':
                    {
                        'OSI_grid_ECMWF-IFS-HR': [1950, 1951],
                        'OSI_grid_EC-Earth3P-HR': [1950, 2011],
                        'OSI_grid_CESM1-CAM5-SE-HR': [1950, 1951],
                        'OSI_ASTE': [2002, 2011]
                    },
                    'valid_dataset':
                    {
                        'OSI_grid_ECMWF-IFS-HR': [1950, 1951],
                        'OSI_grid_EC-Earth3P-HR': [1950, 1951],
                        'OSI_grid_CESM1-CAM5-SE-HR': [1950, 1951],
                    },
                    'test_dataset':
                    {
                        'OSI_grid_ECMWF-IFS-HR': [1950, 1951],
                        'OSI_grid_EC-Earth3P-HR': [1950, 1951],
                        'OSI_grid_CESM1-CAM5-SE-HR': [1950, 1951],
                    },
                    'batch_size': 1,
                    'grid_num': 55056 # 232194
                    }
    dataset_generator = ECMWFIFS(data_params, run_mode="train")
    dataset = Dataset(dataset_generator, distribute=False, shuffle=False)
    dataset = dataset.create_dataset(data_params.get('batch_size'))
    print(dataset.get_dataset_size())
    i = 1
    # save_path = '/mnt/nvme1n1/statistic_data'
    for data in dataset.create_dict_iterator():
        inputs = data['inputs'].asnumpy()
        labels = data['labels'].asnumpy()
        print(f'inputs shape:{inputs.shape}, labels shape:{labels.shape}')
        # mean = np.mean(inputs[0], axis=0)
        # max_ = np.max(inputs[0], axis=0)
        # min_ = np.min(inputs[0], axis=0)
        # print(f'mean: \n{mean}\n max: \n{max_}\n min: \n{min_}')
        # np.save(f'/mnt/nvme1n1/statistic_data/mean_{i}.npy', mean)
        # np.save(f'/mnt/nvme1n1/statistic_data/max_{i}.npy', max_)
        # np.save(f'/mnt/nvme1n1/statistic_data/min_{i}.npy', min_)
        # i += 1
