# -*- coding:utf-8 -*-

from datakeeper.dataset_mold import *
from datasets.utils.data import *
from datasets.utils.plot import *
from datasets.utils.fc_managers import *
from datasets.utils.fg_dataset_mold_base import *
from metlib.datetime import T, TD, chop_by_year
from metlib.kits import *
import json
import numpy as np
import pandas as pd
import re
import pandas.json as _json
from netCDF4 import Dataset
from depot.utils import *
from .fg_merra2_common import *
from collections import deque
from pydx.wind.stat import wind_stater
from metlib.wrf.wps import *

class FG_Merra2_DatasetBase(FG_DatasetMoldBase):
    # pass
    uri_fields = ['dataset', 'subset', 'varname', 'time', 'level', 'jy_ix']
    zipped_fields = set()
    uri_parser = parse_merra2_uri
    allow_RY_download_json = True
    zh_subset_d = merra2_zh_subset_d
    zh_varname_d = merra2_zh_varname_d

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_DatasetBase, self).__init__(name, uri, info, *args, **kwargs)
        extra_info = {}
        if 'domain_info' in info:
            extra_info['domain_info'] = info['domain_info']
        self.domainer = Domainer(info['wps_namelist'], extra_info=extra_info)
        self.begdt = T(info['begdt'])
        self.enddt = T(info['enddt'])
        self.years = info['years']
        self.default_year = info.get('default_year', self.years[-1])
        self.py_summary_sub_varnames = info.get('py_summary_sub_varnames', merra2_py_summary_sub_varnames)
        self.py_summary_sub_varnames_available = info.get('py_summary_sub_varnames_available', self.py_summary_sub_varnames)
        self.zh_name = info.get('zh_name', name)
        self.root_path = kwargs.get('root_path', '')

    def uri_is_standard(self, uri):
        urid = self.parse_uri(uri)
        zipped_fields = set([k for k, v in urid.iteritems() if v == '*'])
        return zipped_fields == self.zipped_fields


class FG_Merra2_PT_Dataset(FG_Merra2_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_PT_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = merra2_pt_varnames

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            'arch': 'subset',
            'name': 'PT',
            'type': '',
            'subs': [],
        }
        for vn in merra2_pt_varnames:
            levels = merra2_levels_d.get(vn, [])
            level_default = merra2_levels_default.get(vn, [levels[0]])
            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'T',
                'datatype': 'timeseries',
                'formats': ['json', 'csv', 'hc_figure'],
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.default_year]
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "can_poly": True,
                }, {
                    "name": "jy_ix",
                    "values": "<%= jy_ix_values %>",
                    "default": "<%= jy_ix_default %>"
                }]
            })
        return res

    @cache_data
    @merra2_point_grid_geometry
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'datapack')
        uri_info = kwargs.get('uri_info', None)

        if uri_info is None:
            uri_info = parse_merra2_uri(uri)

        try:
            try:
                if uri_info['timezone'] != 0:
                    utc_offset = uri_info['timezone']
                else:
                    utc_offset = int(kwargs.get('utc_offset', 8))
                toffset = TD('%sh' % utc_offset)
            except ValueError as e:
                raise BadDataParameter(unicode(e))

            if utc_offset != 0:
                uri_info['begdt'] -= toffset
                uri_info['enddt'] -= toffset

            subset_uri = uri_info['subset']

            varname = uri_info['varname']
            levels = uri_info['level'].split(',')
            results = []
            splits = chop_by_year(uri_info['begdt'], uri_info['enddt'])
            for year, beg, end in splits:
                try:
                    s3uri = 'merra2/%s/ALL/%s/ALL/%s.h5' % (
                        subset_uri,
                        year,
                        uri_info['jy_ix']
                    )
                    fc = raw_tiny_fc_manager.get(s3uri, when_not_exist=['fetch'])
                    fname = fc.filepath
                    colnames = []
                    for level in levels:
                        colnames.append('%s_%s' % (varname, level))
                    cond = "index >= '{:%Y-%m-%d %H:%M:%S}' & index < '{:%Y-%m-%d %H:%M:%S}' & columns = {}".format(
                        beg, end, colnames)
                    h5f=pd.HDFStore(fname)
                    df = h5f.select('df', cond)
                    if len(df) > 0:
                        if varname == 't':
                            df -= 273.15
                        if varname == 'qv':
                            df *= 1000
                        if varname in ('slp', 'psfc'):
                            df /= 100
                        results.append(df)
                    h5f.close()
                except Exception as e:
                    pass
            if len(results) == 0:
                return {}
            semifinal_df = pd.concat(results, axis=0)
            semifinal_df.sort_index(inplace=True)  # sort
            semifinal_df = semifinal_df.groupby(level=0).first()  # unique

            # interval_df = pd.DataFrame(index=TR(uri_info['begdt'], uri_info['enddt'], uri_info['tdelta']))
            # final_df = pd.concat([semifinal_df, interval_df], axis=1, join='inner')
            # TODO: limit beg, end
            final_df = semifinal_df

            beijing_dts = final_df.index.to_pydatetime()
            dts = beijing_dts + toffset
            final_df.index = dts
            final_df.index.name = 'datetime'

            final_df['month'] = final_df.index.month
            w = final_df.month.isin([2, 3, 5, 6, 8, 9, 11, 12])
            suburi_dict = {}
            for level in levels:
                vl = '%s_%s' % (varname, level)
                suburi = '%s/%s/%s/%s/%s/%s' % (
                    uri_info['dataset'],
                    uri_info['subset'],
                    uri_info['varname'],
                    uri_info['time'],
                    level,
                    uri_info['jy_ix'],
                )
                suburi_dict[vl] = suburi
            final_df.drop(['month'], axis=1, inplace=True)

            if datatype == 'dataframe':
                res = final_df
            else:
                tags = {}
                contents = {}
                for level in levels:
                    vl = '%s_%s' % (varname, level)
                    suburi = suburi_dict[vl]
                    subdata = final_df[vl].values
                    tags[suburi] = suburi
                    if varname == 'wspd':
                        suggest_range = (0.0, 10.0)
                    else:
                        suggest_range = merra2_suggest_range_d.get(varname, (None, None))
                    contents[suburi] = {
                        "type": "dataunit",
                        "uri": suburi,
                        "info": {
                            "varname": varname,
                            "type": "timeseries",
                            "zh_varname": merra2_zh_varname_d.get(varname, varname),
                        },
                        "coords": {
                            "varname": varname,
                            "level": level,
                        },
                        "data": {
                            "values": subdata,
                            "dts": dts,
                            "begdt": dts[0],
                            "interval": 3600,
                            "utc_offset": utc_offset,
                            "suggest_range": suggest_range,
                            "units": merra2_units_d.get(varname, ''),
                        }
                    }

                # TODO: tdelta handling
                final_pack = {
                    "type": "datapack",
                    "uri": uri,
                    "tags": tags,
                    "contents": contents
                }

                if datatype == 'datapack':
                    res = final_pack
                else:
                    res = final_pack['contents'].values()[0]

        except Exception as e:
            raise GetDataError(unicode(e))
        return res

    @support_csv
    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_Merra2_PY_Dataset(FG_Merra2_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_PY_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = merra2_py_varnames

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            'arch': 'subset',
            'name': 'PY',
            'type': '',
            'subs': [],
        }
        for vn in self.varnames:
            if vn in ['summary', 'dist', 'rose', 'wpdrose']:
                time_can_poly = True
            else:
                time_can_poly = False
            if vn in ['dist', 'rose', 'wpdrose']:
                level_can_poly = True
            else:
                level_can_poly = False

            if vn == 'summary':
                formats = ['json']
            elif vn in ['dist', 'wpdrose']:
                formats = ['json', 'hc_figure']
            elif vn == 'rose':
                formats = ['json', 'wws', 'hc_figure']
            else:
                formats = ['json']
            levels = merra2_levels_d.get(vn, [])
            level_default = merra2_levels_default.get(vn, [levels[0]])

            if vn == 'summary':
                level_coord = {
                    "name": "level",
                    "values": [u'全部'],
                    "default": [u'全部'],
                    "replace_name": "level",
                    "replace_dict": {u"全部": 'all'}
                }
            else:
                level_coord = {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "can_poly": level_can_poly
                }

            coords = [
                {
                    "name": "time",
                    "values": self.years,
                    "default": [self.default_year],
                    "can_poly": time_can_poly
                },
                level_coord,
                {
                    "name": "jy_ix",
                    "values": "<%= jy_ix_values %>",
                    "default": "<%= jy_ix_default %>"
                }
            ]

            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'T',
                'datatype': 'yearlystat',
                'formats': formats,
                'zipped': False,
                "coords": coords
            })
        return res

    @merra2_py_add_kwargs
    @cache_data
    @merra2_point_grid_geometry
    @pack_data
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        uri_info = kwargs.get('uri_info', None)

        if uri_info is None:
            uri_info = parse_merra2_uri(uri)

        try:
            varname = uri_info['varname']
            if varname == 'rose' and datatype == 'rawrose':
                units = merra2_units_d.get(varname, u'‰')
            else:
                units = merra2_units_d.get(varname, '')
            if varname == 'summary':
                key = "dataset:{}:summary_sub_vars".format(self.topset.name)
                sub_vn_available = self.py_summary_sub_varnames_available
                if 'sub_varnames' in kwargs:
                    sub_varnames = kwargs['sub_varnames'].split(',')
                else:
                    sub_varnames = self.py_summary_sub_varnames
                # choices = [(vn, merra2_zh_varname_d.get(vn, vn), vn in sub_varnames) for vn in sub_vn_available]

                sub_var_info = {}
                choices = []
                for vn in sub_vn_available:
                    tks = vn.split(':', 1)
                    if len(tks) == 2:
                        base_vn, level = tks
                        l_postfix = u' (地面)' if level == 'sfc' else u' ({}m)'.format(level)
                    else:
                        base_vn, level = tks[0], 'sfc'
                        l_postfix = u''
                    zh_base_vn = merra2_zh_varname_d.get(base_vn, base_vn)
                    zh_final_vn = zh_base_vn + l_postfix
                    choices.append((vn, zh_final_vn, vn in sub_varnames))
                    sub_var_info[vn] = (base_vn, level, zh_final_vn)
                varnames = sub_varnames
                res_type = 'summary'
                res = {
                        "type": "dataunit",
                        "uri": uri,
                        "info": {  # 描述性信息
                            "varname": varname,
                            "zh_varname": merra2_zh_varname_d.get(varname, varname),
                            "type": res_type,
                            "sub_varnames": varnames,
                            "para_conf": [{
                                        "name": "test",
                                        "zhname": u"数据集选项",
                                        "paras": [
                                            {
                                                "name": key,
                                                "zhname": u"年汇总变量",
                                                "type": "multichoice",
                                                "choices": choices
                                            }
                                        ]
                                    }]
                        },
                        "data": {}
                    }
                jy, ix = uri_info['jy_ix'].split('_')
                # r = r'(?P<vn>\D+)(?P<level>\d+)'
                for vn in varnames:
                    base_vn, level, zhname = sub_var_info[vn]
                    s3uri = 'merra2/PY/stat/%s/%s/ALL.nc' % (uri_info['time'],
                                                             level)
                    fc = raw_tiny_fc_manager.get(s3uri, when_not_exist=['fetch'])
                    fname = fc.filepath
                    with Dataset(fname) as ds:
                        data = {
                            "name": vn,
                            "zh_name": zhname,
                            "count": ds.variables['count'][0],
                            "units": merra2_units_d.get(vn, '')
                        }
                        for final_name, postfix in [
                            ('values', ''),
                            ('std', '_std'),
                            ('min', '_min'),
                            ('max', '_max'),
                        ]:
                            current_vn = base_vn + postfix
                            if current_vn in ds.variables:
                                v = ds.variables[current_vn]
                                values = v[0, jy, ix]
                                if base_vn == 't':
                                    if final_name not in ['std']:
                                        values -= 273.15
                                elif base_vn == 'qv':
                                    values *= 1000.0
                                elif base_vn in ('slp', 'psfc'):
                                    values /= 100.0
                            else:
                                values = None
                            data[final_name] = values

                        res['data'][vn] = data
            elif varname in ('rose', 'wpdrose'):
                wspd_uri = 'merra2/PT/wspd/%s/%s/%s' % (uri_info['time'],
                                                        uri_info['level'],
                                                        uri_info['jy_ix'])

                wdir_uri = 'merra2/PT/wdir/%s/%s/%s' % (uri_info['time'],
                                                        uri_info['level'],
                                                        uri_info['jy_ix'])

                pt_subset = self.parent.uri_subsets['PT']
                wspd = pt_subset.get_data(wspd_uri, datatype='dataframe')
                wdir = pt_subset.get_data(wdir_uri, datatype='dataframe')
                ws = np.array(wspd['wspd_%s' % uri_info['level']])
                wd = np.array(wdir['wdir_%s' % uri_info['level']])
                if varname == 'rose':
                    value = wind_stater.get_wind_rose(ws, wd, 'permill')
                elif varname == 'wpdrose':
                    wpd_uri = 'merra2/PT/wpd/%s/%s/%s' % (uri_info['time'],
                                                          uri_info['level'],
                                                          uri_info['jy_ix'])
                    wpd = pt_subset.get_data(wpd_uri, datatype='dataframe')
                    wp = np.array(wpd['wpd_%s' % uri_info['level']])
                    value = wind_stater.get_wpd_rose(ws, wd, wp)
                wsbins = ['<3', '3-5', '5-7', '7-9', '9-11', '11-13', '>13']
                wsbinnum = len(wsbins)
                wdbins = [0.0, 22.5, 45.0, 67.5, 90.0, 112.5, 135.0, 157.5,
                          180.0, 202.5, 225.0, 247.5, 270.0, 292.5, 315.0, 337.5]
                wdbinnum = len(wdbins)
                wdbin0 = wdbins[0]
                new_values = np.zeros((wsbinnum, wdbinnum))
                new_values[0] = np.sum(value[0:3], axis=0)
                new_values[1] = np.sum(value[3:5], axis=0)
                new_values[2] = np.sum(value[5:7], axis=0)
                new_values[3] = np.sum(value[7:9], axis=0)
                new_values[4] = np.sum(value[9:11], axis=0)
                new_values[5] = np.sum(value[11:13], axis=0)
                new_values[6] = np.sum(value[13:], axis=0)
                if varname == 'wpdrose':
                    new_values /= 1000.0
                res = {
                        "type": "dataunit",
                        "uri": uri,
                        "info": {  # 描述性信息
                            "varname": varname,
                            "zh_varname": merra2_zh_varname_d.get(varname, varname),
                            "type": "rose",
                        },
                        "data": {
                            "wsbinnum": wsbinnum,
                            "wdbinnum": wdbinnum,
                            "wsbins": wsbins,
                            "wdbins": wdbins,
                            "wdbin0": wdbin0,
                            "values": new_values.tolist(),  # shape: (wsbinnum, wdbinnum)
                            "count": len(ws),
                            "units": units,
                        }
                    }
            elif varname == 'dist':
                wspd_uri = 'merra2/PT/wspd/%s/%s/%s' % (uri_info['time'],
                                                        uri_info['level'],
                                                        uri_info['jy_ix'])
                pt_subset = self.parent.uri_subsets['PT']
                wspd = pt_subset.get_data(wspd_uri, datatype='dataframe')
                ws = np.array(wspd['wspd_%s' % uri_info['level']])
                wsdist = wind_stater.get_wsdist(ws)
                wsbins = wind_stater.WSBINS
                wsbinnum = len(wsbins)
                a, k = wind_stater.get_ak_gamma(wsdist)
                res = {
                    "type": "dataunit",
                    "uri": uri,
                    "info": {  # 描述性信息
                        "varname": varname,
                        "zh_varname": merra2_zh_varname_d.get(varname, varname),
                        "type": "winddist",
                    },
                    "data": {
                        "wsbinnum": wsbinnum,
                        "wsbins": wsbins,
                        "wsdist": wsdist,
                        "weibull_a": a,
                        "weibull_k": k,
                        "count": len(ws),
                        "units": units,
                    }
                }
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    @support_wws
    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_Merra2_PM_Dataset(FG_Merra2_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_PM_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = merra2_pm_varnames

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            'arch': 'subset',
            'name': 'PM',
            'type': '',
            'subs': []
        }
        for vn in merra2_pm_varnames:
            levels = merra2_levels_d.get(vn, [])
            level_default = merra2_levels_default.get(vn, [levels[0]])
            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'T',
                'datatype': 'monthlystat',
                'formats': ['json', 'hc_figure'],
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.default_year],
                    "can_poly": True
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "can_poly": True,
                    "zipped": False
                }, {
                    "name": "jy_ix",
                    "values": "<%= jy_ix_values %>",
                    "default": "<%= jy_ix_default %>"
                }]
            })
        return res

    @cache_data
    @pack_data
    @merra2_point_grid_geometry
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        uri_info = kwargs.get('uri_info', None)

        if uri_info is None:
            uri_info = parse_merra2_uri(uri)

        try:
            subset_uri = uri_info['subset']
            s3uri = 'merra2/%s/%s/%s/%s/ALL.nc' % (subset_uri,
                                                   'stat',
                                                   uri_info['time'],
                                                   uri_info['level'])
            fc = raw_large_fc_manager.get(s3uri, when_not_exist=['fetch'])
            fname = fc.filepath
            jy, ix = uri_info['jy_ix'].split('_')
            v_name = uri_info['varname']
            with Dataset(fname) as ds:
                data = {
                    "count": ds.variables['count'][:],
                    "units": merra2_units_d.get(v_name, ''),
                }
                for final_name, postfix in [
                    ('values', ''),
                    ('std', '_std'),
                    ('min', '_min'),
                    ('max', '_max'),
                ]:
                    current_vn = v_name + postfix
                    if current_vn in ds.variables:
                        v = ds.variables[current_vn]
                        values = v[:, jy, ix]
                        if v_name == 't':
                            if final_name not in ['std']:
                                values -= 273.15
                        elif v_name == 'qv':
                            values *= 1000.0
                        elif v_name in ('slp', 'psfc'):
                            values /= 100.0
                    else:
                        values = None
                    data[final_name] = values
            res = {
                "type": "dataunit",
                "uri": uri,
                "info": {  # 描述性信息
                    "varname": v_name,
                    "zh_varname": merra2_zh_varname_d.get(v_name, v_name),
                    "type": "monthlymean",
                },
                "data": data
            }
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_Merra2_PD_Dataset(FG_Merra2_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_PD_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = merra2_pd_varnames

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            'arch': 'subset',
            'name': 'PD',
            'type': '',
            'subs': []
        }
        for vn in merra2_pm_varnames:
            levels = merra2_levels_d.get(vn, [])
            level_default = merra2_levels_default.get(vn, [levels[0]])
            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'T',
                'datatype': 'diurnalstat',
                'formats': ['json', 'hc_figure'],
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.default_year],
                    "can_poly": True
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "can_poly": True
                }, {
                    "name": "jy_ix",
                    "values": "<%= jy_ix_values %>",
                    "default": "<%= jy_ix_default %>"
                }]
            })
        return res

    @cache_data
    @pack_data
    @merra2_point_grid_geometry
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        uri_info = kwargs.get('uri_info', None)

        if uri_info is None:
            uri_info = parse_merra2_uri(uri)

        if uri_info['timezone'] != 0:
            utc_offset = uri_info['timezone']
        else:
            utc_offset = int(kwargs.get('utc_offset', 8))
        uri_tail = '?utc_offset=%s' % utc_offset

        try:
            subset_uri = uri_info['subset']
            s3uri = 'merra2/%s/%s/%s/%s/ALL.nc' % (subset_uri,
                                                   'stat',
                                                   uri_info['time'],
                                                   uri_info['level'])
            fc = raw_large_fc_manager.get(s3uri, when_not_exist=['fetch'])
            fname = fc.filepath
            jy, ix = uri_info['jy_ix'].split('_')
            v_name = uri_info['varname']
            with Dataset(fname) as ds:
                data = {
                    "count": ds.variables['count'][:],
                    "units": merra2_units_d.get(v_name, ''),
                }
                for final_name, postfix in [
                    ('values', ''),
                    ('std', '_std'),
                    ('min', '_min'),
                    ('max', '_max'),
                ]:
                    current_vn = v_name + postfix
                    if current_vn in ds.variables:
                        v = ds.variables[current_vn]
                        values = v[:, jy, ix]
                        if v_name == 't':
                            if final_name not in ['std']:
                                values -= 273.15
                        elif v_name == 'qv':
                            values *= 1000.0
                        elif v_name in ('slp', 'psfc'):
                            values /= 100.0
                    else:
                        values = None
                    data[final_name] = values

            res = {
                "type": "dataunit",
                "uri": uri,
                "info": {  # 描述性信息
                    "varname": v_name,
                    "zh_varname": merra2_zh_varname_d.get(v_name, v_name),
                    "type": "diurnalmean",
                },
                "data": data
            }
            if utc_offset != 0:
                for sub in ('values', 'std', 'count'):
                    deq = deque(res['data'][sub])
                    deq.rotate(utc_offset)
                    res['data'][sub] = list(deq)

        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_Merra2_RY_Dataset(FG_Merra2_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_RY_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = merra2_ry_varnames

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            'arch': 'subset',
            'name': 'RY',
            'type': '',
            'subs': []
        }
        for vn in self.varnames:
            levels = merra2_levels_d.get(vn, [])
            level_default = merra2_levels_default.get(vn, [levels[0]])
            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'T',
                'datatype': 'yearlystat',
                'formats': ['csv', 'nc', 'json', 'rect_figure', 'kmz'],
                'zipped': False,
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.default_year]
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "zipped": False
                }, {
                    "name": "jy_ix",
                    "values": "<%= jy_ix_values %>",
                    "default": "<%= jy_ix_default %>",
                }]
            })
        return res

    @cache_data(valid_params={'dataunit': ['datatype', 'user']})
    @provide_figinfo(suggest_cmap_dict=merra2_suggest_cmap_d)
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        uri_info = kwargs.get('uri_info', None)
        sample = kwargs.get('sample', False)

        if uri_info is None:
            uri_info = parse_merra2_uri(uri)

        try:
            varname = uri_info['varname']
            jy_ix = uri_info['jy_ix']
            subset_uri = 'PY'
            s3uri = 'merra2/%s/%s/%s/%s/ALL.nc' % (
                subset_uri,
                'stat',
                uri_info['time'],
                uri_info['level']
            )
            fc = raw_medium_fc_manager.get(s3uri, when_not_exist=['fetch'])
            fname = fc.filepath

            # jy1, jy2, ix1, ix2
            with Dataset(fname, 'r') as ncf:
                v_name = uri_info['varname']
                v = ncf.variables[v_name]
                jys, ixs = jy_ix.split('_')
                jy_min, jy_max = jys.split(':')
                ix_min, ix_max = ixs.split(':')
                limit = ncf.variables['lon'][:].shape[0]
                jy1 = int(jy_min); jy2 = int(jy_max)+1
                lat = ncf.variables['lat'][jy1:jy2]
                if int(ix_max) > limit:
                    ix1 = int(ix_min); ix2 = None
                    ix3 = 0; ix4 = int(ix_max) - limit + 1
                    data1 = v[0, jy1:jy2, ix1:ix2]
                    data2 = v[0, jy1:jy2, ix3:ix4]
                    data = np.concatenate((data1, data2), axis=1)
                    lon1 = ncf.variables['lon'][ix1:ix2]
                    lon2 = ncf.variables['lon'][ix3:ix4] + 360
                    lon = np.concatenate((lon1, lon2), axis=0)
                else:
                    ix1 = int(ix_min); ix2 = int(ix_max)+1
                    data = v[0, jy1:jy2, ix1:ix2]
                    lon = ncf.variables['lon'][ix1:ix2]
            if v_name in ('t', 't_max', 't_min'):
                data -= 273.15
            if v_name == 'qv':
                data *= 1000
            if v_name in ('slp', 'psfc'):
                data /= 100
            res = {
                "uri": uri,
                "type": "dataunit",
                "info": {
                    "varname": varname,
                    "type": "rect_yearlystat",
                    "sample": sample
                },
                "coords": {
                    "dataset": uri_info['dataset'],
                    "subset": uri_info['subset'],
                    "varname": uri_info['varname'],
                    "time": uri_info['time'],
                    "jy_ix": uri_info['jy_ix']
                },
                "data": {
                    "lat": lat,
                    "lon": lon,
                    "values": data,
                    "suggest_range": merra2_suggest_range_d.get(varname, (None, None)),
                    "units": merra2_units_d.get(varname, ''),
                }
            }
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    def get_file(self, uri, dest, request=None, *args, **kwargs):
        return self.RY_get_file(uri, dest, request=request, *args, **kwargs)


class FG_Merra2_Dataset(FG_Merra2_DatasetBase):
    mold_name = 'Merra2'
    subset_classes = [
        {'name': 'PointYearlyStat', 'uri': 'PY', 'class': FG_Merra2_PY_Dataset},
        {'name': 'PointMonthlyStat', 'uri': 'PM', 'class': FG_Merra2_PM_Dataset},
        {'name': 'PointDiurnalStat', 'uri': 'PD', 'class': FG_Merra2_PD_Dataset},
        {'name': 'PointTimeseries', 'uri': 'PT', 'class': FG_Merra2_PT_Dataset},
        {'name': 'RectYearlyStat', 'uri': 'RY', 'class': FG_Merra2_RY_Dataset}
    ]
    tags = ['wind', 'merra', 'meteorology']

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_Merra2_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.desc = info.get('desc', info.get('zh_name', name))

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            "arch": "dataset",
            "mold": self.mold_name,
            "type": "DSV",
            "name": self.name,
            "zh_name": self.zh_name,
            "desc": self.desc,
            "common": {},
            "subset_dict": merra2_zh_subset_d,
            "varname_dict": merra2_zh_varname_d,
            "subs": []
        }
        for class_info in self.subset_classes:
            sub_schema = self.uri_subsets[class_info["uri"]].get_schema(request=None, *args, **kwargs)
            res["subs"].append(sub_schema)
        return res

    def lookup(self, info, *args, **kwargs):
        try:
            results = []
            if info['type'] == 'point':
                valid_points = self.domainer.lookup_lonlat(lon=float(info['lon']), lat=float(info['lat']),
                                                           int_index=True, trim=True, level_unique=True)
                if len(valid_points) == 0:
                    return results
                merra_ll = latlon_to_merra2_jyix(float(info['lat']), float(info['lon']))
                res = {
                    "dataset": self.name,
                    "subsets": ["PT", "PY", "PM", "PD"],
                    "versions": [],
                    "version_replace_dict": {},
                    "jy_ix_values": [merra_ll],
                    "jy_ix_default": [merra_ll]
                }
                for point_info in valid_points:
                    jy_ix = '%03d_%03d' % (point_info['jy'], point_info['ix'])
                    resolution = point_info['resolution']
                    domain = str(point_info['domain'])
                    res['versions'].append(resolution)
                    res['version_replace_dict'][resolution] = merra_ll
                    res['version_default'] = [resolution]
                results.append(res)

            elif info['type'] == 'rect':
                rect_info = get_merra2_rect(
                    lon1=float(info['lon1']),
                    lat1=float(info['lat1']),
                    lon2=float(info['lon2']),
                    lat2=float(info['lat2']),
                )
                res = {
                    "dataset": self.name,
                    "subsets": ['RY'],
                    "jy_ix_values": [rect_info['jy_ix']],
                    "jy_ix_default": [rect_info['jy_ix']],
                }
                results.append(res)

            return results
        except Exception as e:
            raise LookupError(u'[%s] %s' % (type(e), unicode(e)))

    def get_data(self, uri, *args, **kwargs):
        try:
            if 'uri_info' in kwargs:
                uri_info = kwargs['uri_info']
            else:
                uri_info = parse_merra2_uri(uri)
            subset = self.uri_subsets.get(uri_info['subset'], None)
            if subset is None:
                return None

            res = subset.get_data(uri, *args, **kwargs)
            return res
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(u'[%s] %s' % (type(e), unicode(e)))

    @protect_hack
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        try:
            uri_info = parse_merra2_uri(uri)
            subset = self.uri_subsets.get(uri_info['subset'], None)
            if subset is None:
                raise ValueError("No such subset in Merra2: %s" % uri_info['subset'])

            getres = subset.get_file(uri, dest, request=request, uri_info=uri_info, *args, **kwargs)
            return getres
        except Exception as e:
            raise GetFileError(u'[%s] %s' % (type(e), unicode(e)))

    def is_free(self, uri=None, *args, **kwargs):
        return False
