# -*- coding:utf-8 -*-

from datasets.utils.eq_hour import *
from datasets.utils.fc_managers import *
from datasets.utils.fg_dataset_mold_base import *
from metlib.wrf.wps import *
from metlib.datetime import T, TD, chop_by_year
from metlib.kits import *
import numpy as np
import pandas as pd
from netCDF4 import Dataset
from .fg_wrf_common import *
from pydx.wind.stat import wind_stater


class FG_WRF_DatasetBase(FG_DatasetMoldBase):
    uri_fields = ['dataset', 'subset', 'varname', 'time', 'level', 'grid']
    zipped_fields = set()
    uri_parser = parse_fgwrf_uri
    allow_RY_download_json = False
    zh_subset_d = fgwrf_zh_subset_d
    zh_varname_d = fgwrf_zh_varname_d

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_DatasetBase, self).__init__(name, uri, info, *args, **kwargs)
        extra_info = {}
        if 'domain_info' in info:
            extra_info['domain_info'] = info['domain_info']
        self.domainer = Domainer(info['wps_namelist'], extra_info=extra_info)
        self.begdt = T(info['begdt'])
        self.enddt = T(info['enddt'])
        self.years = info['years']
        self.levels = info['levels']
        self.zh_name = info.get('zh_name', info['name'])
        self.root_path = kwargs.get('root_path', '')
        self.dataset_storage_name = info.get('dataset_storage_name', info['name'])
        self.version = info.get('version', 1)
        self.sfc_vars = info.get('sfc_vars', ['rain', 'snow', 'swdown', 'glw', 'ts', 'slp'])
        self.py_varnames = info.get('py_varnames', ['summary', 'eqhour', 'windrose', 'wpdrose', 'dist'])
        self.pm_varnames = info.get('pm_varnames', ['wspd', 'wpd', 'psfc', 'td', 'rhoair', 'rh'])
        self.pd_varnames = info.get('pd_varnames', ['wspd', 'wpd', 'psfc', 'td', 'rhoair', 'rh'])
        self.pt_varnames = info.get('pt_varnames', ['wspd', 'wdir', 'wpd', 'psfc', 'td', 'rhoair', 'rh'])
        self.ry_varnames = info.get('ry_varnames', ['wspd', 'wpd', 'psfc', 'td', 'rhoair', 'rh'])
        self.py_summary_sub_varnames = info.get('py_summary_sub_varnames',
                                                ['wspd', 'wpd', 'td', 'psfc', 'rhoair', 'rh'])
        self.py_sfc_summary_sub_varnames = info.get('py_sfc_summary_sub_varnames',
                                                    ['rain', 'snow', 'swdown', 'glw', 'ts', 'slp'])
        self.py_stat_format = info.get('py_stat_format', 'json')
        self.pm_stat_format = info.get('pm_stat_format', 'json')
        self.pd_stat_format = info.get('pd_stat_format', 'json')
        self.py_rose_format = info.get('py_rose_format', 'json')
        self.py_wpdrose_format = info.get('py_wpdrose_format', self.py_rose_format)
        self.py_dist_format = info.get('py_dist_format', 'nc')
        self.pt_format = info.get('pt_format', 'json')

    def get_domain_resolution_dict(self):
        res = {}


class FG_WRF_PT_Dataset(FG_WRF_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_PT_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = fgwrf_pt_varnames

    @property
    def schema(self):
        res = {
            'arch': 'subset',
            'name': 'PT',
            'type': '',
            'subs': [],
        }
        for vn in self.pt_varnames:
            levels = self.levels
            level_default = ['70']
            if self.version >= 2:
                if vn in fgwrf_combine_varname_d:
                    levels = self.levels + fgwrf_combine_varname_d[vn].keys()
            if vn in self.sfc_vars:
                levels = ['sfc']
                level_default = ['sfc']
            if level_default[0] not in levels:
                level_default = [levels[0]]

            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'VLT',
                'datatype': 'timeseries',
                'formats': ['json', 'csv', 'hc_figure'],
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]]
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "zipped": False,
                    "can_poly": True,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>"
                }]
            })
        return res

    @cache_data
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'datapack')
        uri_info = kwargs.get('uri_info', None)
        if uri_info is None:
            uri_info = parse_fgwrf_uri(uri)

        try:
            # TODO: 本数据集的时区是+8, 所以需要反处理这种timezone
            try:
                if uri_info['timezone'] != 0:
                    utc_offset = uri_info['timezone']
                else:
                    utc_offset = int(kwargs.get('utc_offset', 8))
                toffset = TD('%sh' % (utc_offset - 8))
            except ValueError as e:
                raise BadDataParameter(unicode(e))

            if utc_offset != 8:
                uri_info['begdt'] -= toffset
                uri_info['enddt'] -= toffset

            varname = uri_info['varname']
            if (uri_info['level'] == '*'):
                levels = self.levels
            else:
                levels = uri_info['level'].split(',')

            results = []
            if self.pt_format == "json":
                splits = chop_by_year(uri_info['begdt'], uri_info['enddt'])
                for year, beg, end in splits:
                    try:
                        dts = TR(T(year + '0101'), T(year + '0101') + TD('1Y'), '1h')
                        data_dict = {}
                        for level in levels:
                            combine_vars = fgwrf_combine_varname_d.get(varname, {})
                            true_varname = combine_vars.get(level, varname)

                            s3uri = '%s/%s/%s/%s/%s/%s/%s.json' % (
                                self.dataset_storage_name,
                                uri_info['subset'],
                                true_varname,
                                year,
                                uri_info['domain'],
                                level,
                                uri_info['jy_ix']
                            )
                            fc = raw_tiny_fc_manager.get(s3uri, when_not_exist=['fetch'])
                            fname = fc.filepath
                            part_res = json.load(open(fname))
                            values = part_res['contents']['data']['data']['values']
                            data_dict['%s_%s' % (varname, level)] = values
                        part_df = pd.DataFrame(data_dict, index=dts)
                        part_df.index.name = 'datetime'
                        results.append(part_df)
                    except Exception as e:
                        pass
            elif self.pt_format == 'h5':
                for tlabel in self.years:
                    try:
                        s3uri = '%s/%s/ALL/%s/%s/ALL/%s.h5' % (
                            self.dataset_storage_name,
                            uri_info['subset'],
                            tlabel,
                            uri_info['domain'],
                            uri_info['jy_ix']
                        )
                        fc = raw_small_fc_manager.get(s3uri, when_not_exist=['fetch'])
                        fname = fc.filepath
                        colnames = []
                        combine_vars = fgwrf_combine_varname_d.get(varname, {})
                        translate_colnames = {}
                        for level in levels:
                            true_varname = combine_vars.get(level, varname)
                            colnames.append('%s_%s' % (true_varname, level))
                            if true_varname != varname:
                                translate_colnames['%s_%s' % (true_varname, level)] = '%s_%s' % (varname, level)
                        cond = "index >= '{:%Y-%m-%d %H:%M:%S}' & index < '{:%Y-%m-%d %H:%M:%S}' & columns = {}".format(
                            uri_info['begdt'], uri_info['enddt'], colnames)

                        h5f = pd.HDFStore(fname)
                        df = h5f.select('df', cond)
                        if translate_colnames:
                            df.rename(columns=translate_colnames, inplace=True)

                        if len(df) > 0:
                            results.append(df)
                        h5f.close()
                    except Exception as e:
                        pass

            if len(results) == 0:
                return {}

            semifinal_df = pd.concat(results, axis=0)
            semifinal_df.sort_index(inplace=True)  # sort
            semifinal_df = semifinal_df.groupby(level=0).first()  # unique

            # interval_df = pd.DataFrame(index=TR(uri_info['begdt'], uri_info['enddt'], uri_info['tdelta']))
            # final_df = pd.concat([semifinal_df, interval_df], axis=1, join='inner')
            # TODO: limit beg, end
            final_df = semifinal_df

            beijing_dts = final_df.index.to_pydatetime()
            dts = beijing_dts + toffset
            final_df.index = dts
            final_df.index.name = 'datetime'

            final_df['month'] = final_df.index.month
            w = final_df.month.isin([2, 3, 5, 6, 8, 9, 11, 12])
            perm_dict = {}
            suburi_dict = {}
            for level in levels:
                vl = '%s_%s' % (varname, level)
                suburi = '%s/%s/%s/%s/%s/%s' % (
                    uri_info['dataset'],
                    uri_info['subset'],
                    uri_info['varname'],
                    uri_info['year'],
                    level,
                    uri_info['grid'],
                )
                suburi_dict[vl] = suburi
                has_perm = user_owns_data(request.user, suburi)
                perm_dict[vl] = has_perm
                if has_perm == 'NoAccess':
                    final_df[vl] = np.nan
                else:
                    sample = not has_perm
                    if sample:
                        final_df['%s_sample' % vl] = pd.rolling_mean(final_df[vl], 24)
                        final_df[vl][w] = final_df['%s_sample' % vl][w]
                        final_df.drop(['%s_sample' % vl], axis=1, inplace=True)
            final_df.drop(['month'], axis=1, inplace=True)

            if datatype == 'dataframe':
                res = final_df
            else:
                tags = {}
                contents = {}
                for level in levels:
                    vl = '%s_%s' % (varname, level)
                    suburi = suburi_dict[vl]
                    has_perm = perm_dict[vl]
                    if has_perm == 'NoAccess':
                        contents[suburi] = get_no_access_dataunit(suburi)
                    else:
                        subdata = final_df[vl].values
                        tags[suburi] = suburi
                        contents[suburi] = {
                            "type": "dataunit",
                            "uri": suburi,
                            "info": {
                                "varname": varname,
                                "type": "timeseries",
                                "zh_varname": fgwrf_zh_varname_d.get(varname, varname),
                                "sample": not has_perm,
                            },
                            "coords": {
                                "varname": varname,
                                "level": level,
                            },
                            "data": {
                                "values": subdata,
                                "dts": dts,
                                "begdt": dts[0],
                                "interval": 3600,
                                "utc_offset": utc_offset,
                                "suggest_range": fgwrf_pt_suggest_range_d.get(varname, (None, None)),
                                "units": fgwrf_pt_units_d.get(varname, ''),
                            }
                        }

                # TODO: tdelta handling
                final_pack = {
                    "type": "datapack",
                    "uri": uri,
                    "tags": tags,
                    "contents": contents
                }

                if datatype == 'datapack':
                    res = final_pack
                else:
                    res = final_pack['contents'].values()[0]
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    @support_csv
    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_WRF_PY_Dataset(FG_WRF_DatasetBase):
    zipped_fields = {'varname', 'level'}

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_PY_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = self.py_varnames
        self.mean_varnames = self.py_summary_sub_varnames
        self.uri_field_values = {
            'varname': self.varnames,
            'level': self.levels
        }

    @property
    def schema(self):
        res = {
            'arch': 'subset',
            'name': 'PY',
            'type': '',
            'subs': [],
        }
        if self.py_stat_format == "json":
            coord_zipped = True
        else:
            coord_zipped = False

        for vn in self.py_varnames:
            time_can_poly = False
            level_can_poly = False
            if vn in ('summary', 'sfc_summary', 'eqhour', 'dist'):
                time_can_poly = True
            if vn in ('summary', 'dist'):
                level_can_poly = True

            if vn in ('summary', 'sfc_summary', 'eqhour'):
                formats = ['json']
            elif vn in ('windrose', 'rose'):
                formats = ['json', 'wws', 'hc_figure']
            else:
                formats = ['json', 'hc_figure']

            variable = {
                'arch': 'variable',
                'name': vn,
                'vartype': 'VLT',
                'datatype': 'yearlystat',
                'formats': formats,
                'zipped': False
            }

            if vn == 'eqhour':
                variable['coords'] = [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]],
                    "can_poly": time_can_poly,
                }, {
                    "name": "level",
                    "values": [u'全部'],
                    "default": [u'全部'],
                    "replace_name": "level",
                    "replace_dict": {u"全部": 'all'},
                    "can_poly": level_can_poly,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>",
                }]
            else:
                levels = self.levels
                level_default = ['70']
                if vn == 'sfc_summary':
                    levels = ['sfc']
                    level_default = ['sfc']
                if level_default[0] not in levels:
                    level_default = [levels[0]]

                variable["coords"] = [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]],
                    "can_poly": time_can_poly,
                }, {
                    "name": "level",
                    "values": levels,
                    "zipped": coord_zipped,
                    "default": level_default,
                    "can_poly": level_can_poly,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>",
                }]

            res['subs'].append(variable)

        return res

    @cache_data
    @pack_data
    @perm_data
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        sample = kwargs.get('sample', False)
        uri_info = kwargs.get('uri_info', None)
        if uri_info is None:
            uri_info = parse_fgwrf_uri(uri)

        try:
            # 分布数据 (特殊处理)
            # TODO: 没有sample功能
            unit_uri_info = self.parse_uri(uri)
            if unit_uri_info['varname'] == 'dist':
                if self.version >= 2:
                    subset = 'PY'
                else:
                    subset = 'RY'
                s3uri = '%s/%s/%s/%s/%s/%s/ALL.nc' % (
                    self.dataset_storage_name,
                    subset,
                    'dist',  # uri_info['varname'],
                    uri_info['time'],
                    uri_info['domain'],
                    uri_info['level'],
                )
                fc = raw_medium_fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath

                jy_ix = uri_info['jy_ix']
                jy, ix = jy_ix.split('_')
                jy = int(jy)
                ix = int(ix)

                with Dataset(fname, 'r') as ncf:
                    wsdist = ncf.variables['wsdist'][0, jy, ix]
                    wddist = ncf.variables['wddist'][0, jy, ix]
                    wdcdist = ncf.variables['wdcdist'][0, jy, ix]
                    wsbin = ncf.variables['wsbin'][:]
                    wdbin = ncf.variables['wdbin'][:]
                    count = ncf.variables['count'][0]
                    a = ncf.variables['a'][0, jy, ix]
                    k = ncf.variables['k'][0, jy, ix]

                res = {
                    "type": "dataunit",
                    "uri": uri,
                    "info": {
                        "varname": 'dist',
                        "zh_varname": fgwrf_zh_varname_d['dist'],
                        "type": 'winddist',
                        "sample": sample
                    },
                    "coords": {
                        "dataset": uri_info['dataset'],
                        "subset": uri_info['subset'],
                        "varname": unit_uri_info['varname'],
                        "time": uri_info['year'],
                        "level": uri_info['level'],
                        "grid": uri_info['grid']
                    },
                    "data": {
                        "wsbinnum": len(wsbin),
                        "wdbinnum": len(wdbin),
                        "wsbins": wsbin,
                        "wdbins": wdbin,
                        "calm_thres": 0.3,
                        "wsdist": wsdist,
                        "wddist": wddist,
                        "wdcdist": wdcdist,
                        "count": count,
                        "weibull_a": a,
                        "weibull_k": k,
                        "units": fgwrf_py_units_d.get('dist', ''),
                    }
                }

                return res
            # 等效小时数(特殊处理
            elif unit_uri_info['varname'] == 'eqhour':
                uri_tmpl = unit_uri_info['dataset'] + '/PT/%s/' + \
                           unit_uri_info['time'] + \
                           '/%s/' + unit_uri_info['grid']

                pt_subset = self._parent.uri_subsets['PT']

                wspd_70_uri = uri_tmpl % ('wspd', '70')
                wspd_80_uri = uri_tmpl % ('wspd', '80')
                wspd_100_uri = uri_tmpl % ('wspd', '100')
                rhoair_70_uri = uri_tmpl % ('rhoair', '70')
                rhoair_80_uri = uri_tmpl % ('rhoair', '80')

                modified_kwargs = kwargs.copy()
                modified_kwargs.update({
                    'datatype': 'dataframe',
                    'request': request,
                })
                wspd_70_df = pt_subset.get_data(wspd_70_uri, **modified_kwargs)
                wspd_80_df = pt_subset.get_data(wspd_80_uri, **modified_kwargs)
                wspd_100_df = pt_subset.get_data(wspd_100_uri, **modified_kwargs)
                rhoair_70_df = pt_subset.get_data(rhoair_70_uri, **modified_kwargs)
                rhoair_80_df = pt_subset.get_data(rhoair_80_uri, **modified_kwargs)

                wspd_70 = wspd_70_df['wspd_70'].values
                wspd_80 = wspd_80_df['wspd_80'].values
                wspd_100 = wspd_100_df['wspd_100'].values
                wspd_65 = interp_wspd(wspd1=wspd_70, wspd2=wspd_100,
                                      h1=70.0, h2=100.0, target_h=65.0, base_h=70.0,
                                      base_wspd=wspd_70)
                wspd_85 = interp_wspd(wspd1=wspd_70, wspd2=wspd_100,
                                      h1=70.0, h2=100.0, target_h=85.0, base_h=80.0,
                                      base_wspd=wspd_80)
                rhoair_70 = rhoair_70_df['rhoair_70'].values
                rhoair_80 = rhoair_80_df['rhoair_80'].values

                res_data = {}
                for tb_model in fgwrf_turbine_models:
                    para = power_curves.get(tb_model)
                    if para['height'] == 65:
                        wspd = wspd_65
                        rhoair = rhoair_70
                    elif para['height'] == 80 or para['height'] == 78:
                        wspd = wspd_80
                        rhoair = rhoair_80
                    elif para['height'] == 85:
                        wspd = wspd_85
                        rhoair = rhoair_80

                    eqhour = get_eqhour(tb_model, wspd, rhoair)
                    res_data[tb_model] = {
                        'name': tb_model,
                        'zh_name': tb_model,
                        'values': eqhour,
                        'eqhour': eqhour,
                        'model': tb_model,
                        'rated_power': para['rated_power'],
                        'height': para['height'],
                        'IEC': para['IEC'],
                        'units': u'%s-%s(%s)' % (para['rated_power'], para['height'], para['IEC']),
                    }

                res = {
                    "type": "dataunit",
                    "uri": uri,
                    "info": {
                        "varname": 'eqhour',
                        "zh_varname": fgwrf_zh_varname_d['eqhour'],
                        "type": 'eqhour_summary',
                        "sample": sample,
                        "sub_varnames": fgwrf_turbine_models,
                    },
                    "coords": {
                        "dataset": uri_info['dataset'],
                        "subset": uri_info['subset'],
                        "varname": unit_uri_info['varname'],
                        "time": uri_info['year'],
                        "level": uri_info['level'],
                        "grid": uri_info['grid']
                    },
                    "data": res_data,
                }
                return res

            # 处理非dist, eqhour的情况
            if sample:
                # subset_uri = uri_info['subset'] + '_sample'
                subset_uri = uri_info['subset']
            else:
                subset_uri = uri_info['subset']

            vn = uri_info['varname']
            if self.py_stat_format == "json":
                if vn == 'windrose' and datatype == 'rawrose':
                    pt_subset = self._parent.uri_subsets['PT']
                    uri_tmpl = unit_uri_info['dataset'] + '/PT/%s/' + \
                               unit_uri_info['time'] + \
                               '/' + unit_uri_info['level'] + '/' + unit_uri_info['grid']
                    wspd_uri = uri_tmpl % 'wspd'
                    wdir_uri = uri_tmpl % 'wdir'

                    modified_kwargs = kwargs.copy()
                    modified_kwargs.update({
                        'datatype': 'dataframe',
                        'request': request,
                    })
                    modified_kwargs.pop('uri_info', None)

                    wspd_df = pt_subset.get_data(wspd_uri, **modified_kwargs)
                    wdir_df = pt_subset.get_data(wdir_uri, **modified_kwargs)

                    rose = wind_stater.get_wind_rose(wspd_df[wspd_df.columns[0]].values,
                                                     wdir_df[wdir_df.columns[0]].values, return_type='permill')

                    jy, ix = [int(tk) for tk in uri_info['jy_ix'].split('_')]
                    lon, lat = self.domainer.ij_to_lonlat(ix, jy, uri_info['domain'])

                    res = {
                        "type": "dataunit",
                        "uri": uri,
                        "info": {
                            "grid_lon": lon,
                            "grid_lat": lat,
                            "dataset": uri_info['dataset'],
                            "grid": uri_info['grid'],
                            "time": uri_info['time'],
                            "level": uri_info['level'],
                            "sample": sample,
                        },
                        "data": {
                            "wsbins": ["%d" % (i + 1) for i in range(rose.shape[0])],
                            "wdbinnum": 16,
                            "values": rose,
                            "units": fgwrf_py_units_d.get(vn, ''),
                        }
                    }
                else:
                    common = {}
                    s3uri = '%s/%s/%s/%s/%s/%s/%s.json' % (
                        self.dataset_storage_name,
                        subset_uri,
                        'ALL',
                        uri_info['time'],
                        uri_info['domain'],
                        'ALL',
                        uri_info['jy_ix'])
                    fc = raw_tiny_fc_manager.get(s3uri, when_not_exist=['fetch'])
                    fname = fc.filepath
                    raw_pack = json.load(open(fname), strict=False)
                    common = raw_pack['common']
                    common['info'].pop('substitute_coords')

                    thetag = None
                    for tag, unituri in raw_pack['tags'].iteritems():
                        if len(unituri.split('/')) == 7:
                            unituri = fgwrf_uri_old2new(unituri)
                        if unituri == uri:
                            thetag = tag
                            break
                    if thetag:
                        res = raw_pack['contents'][thetag]
                        if vn == 'wpdrose':
                            wpdrose = np.array(res['data']['values'])
                            wpdrose /= 1000.0  # kWh/m^2
                            res['data']['values'] = wpdrose
                            res['data']['units'] = fgwrf_py_units_d.get('wpdrose', u'kWh/m²')
                        elif vn == 'summary':
                            res['info']['zh_varname'] = fgwrf_zh_varname_d.get(vn, vn)

                        if common:
                            for k, v in common.iteritems():
                                if k not in res:
                                    res[k] = v
                                else:
                                    v.update(res[k])
                                    res[k] = v
                        amend_dataunit(res, info={
                            "info": {
                                "sample": sample
                            }
                        })
            elif self.py_stat_format == "nc":
                jy, ix = [int(tk) for tk in uri_info['jy_ix'].split('_')]
                grid_lon, grid_lat = self.domainer.ij_to_lonlat(jy, ix, domain=uri_info['domain'])

                fc_manager = raw_large_fc_manager
                if vn in ('summary', 'sfc_summary'):
                    varname_uri = 'stat'
                    fc_manager = raw_small_fc_manager
                elif vn == 'windrose':
                    varname_uri = 'rose'
                else:
                    varname_uri = vn

                s3uri = '%s/%s/%s/%s/%s/%s/ALL.nc' % (
                    self.dataset_storage_name,
                    subset_uri,
                    varname_uri,
                    uri_info['time'],
                    uri_info['domain'],
                    uri_info['level'],
                )
                fc = fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath
                with Dataset(fname) as ds:
                    info = {
                        "dataset": uri_info['dataset'],
                        "zh_dataset": self.zh_name,
                        "subset": uri_info['subset'],
                        "zh_subset": fgwrf_zh_subset_d[uri_info['subset']],
                        "varname": vn,
                        "zh_varname": fgwrf_zh_varname_d.get(vn, vn),
                        "grid_lon": grid_lon,
                        "grid_lat": grid_lat,
                        "level": uri_info['level'],
                        "grid": uri_info['grid'],
                        "time": uri_info['time'],
                        "sample": sample,
                    }
                    coords = {
                        "dataset": uri_info['dataset'],
                        "subset": "PY",
                        "varname": vn,
                        "time": uri_info['time'],
                        "level": uri_info['level'],
                        "grid": uri_info["grid"],
                    }
                    if vn in ('summary', 'sfc_summary'):
                        if vn == 'summary':
                            sub_varnames = self.py_summary_sub_varnames
                            res_type = 'wrf_summary'
                        elif vn == 'sfc_summary':
                            sub_varnames = self.py_sfc_summary_sub_varnames
                            res_type = 'wrf_sfc_summary'

                        info['sub_varnames'] = sub_varnames

                        data = {}
                        count = ds.variables['count'][0]
                        for subvn in sub_varnames:
                            if subvn in ds.variables:
                                subv = ds.variables[subvn]
                                stat_method = subv.stat_method
                                values = subv[0, jy, ix]
                                if subvn in ('swdown', 'glw', 'dni', 'ghi', 'ddif', 'ddir'):
                                    values /= 1000.0  # kWh/m^2
                            else:
                                values = None
                            stdvn = subvn + '_std'
                            if stdvn in ds.variables:
                                std = ds.variables[subvn][0, jy, ix]
                            else:
                                std = None

                            units = fgwrf_py_units_d.get(subvn, '')
                            zh_name = fgwrf_zh_varname_d.get(subvn, subvn)

                            data[subvn] = {
                                "name": subvn,
                                "zh_name": zh_name,
                                "values": values,
                                "count": count,
                                "std": std,
                                "units": units,
                            }
                        res = {
                            "type": "dataunit",
                            "info": info,
                            "data": data,
                            "coords": coords,
                        }
                        res['info']['type'] = res_type
                    elif vn in ('windrose', 'wpdrose'):
                        rose = ds.variables[varname_uri][0, jy, ix]
                        new_rose = np.zeros((7, 16), dtype='f4')
                        for i, (imin, imax) in enumerate(
                                [(0, 3), (3, 5), (5, 7), (7, 9), (9, 11), (11, 13), (13, None)]):
                            new_rose[i] = rose[imin:imax].sum(axis=0)
                        if vn == 'wpdrose':
                            new_rose /= 1000.0  # kWh/m^2

                        if vn == 'windrose' and datatype == 'rawrose':
                            res = {
                                "type": "dataunit",
                                "info": info,
                                "data": {
                                    "wsbins": ["%d" % (i + 1) for i in range(rose.shape[0])],
                                    "wdbinnum": 16,
                                    "values": rose,
                                    "units": fgwrf_py_units_d.get(vn, ''),
                                },
                                "coords": coords,
                            }
                        else:
                            res = {
                                "type": "dataunit",
                                "info": info,
                                "data": {
                                    "wsbins": ["<3", "3-5", "5-7", "7-9", "9-11", "11-13", ">13"],
                                    "wdbinnum": 16,
                                    "values": new_rose,
                                    "units": fgwrf_py_units_d.get(vn, ''),
                                },
                                "coords": coords,
                            }
                        res['info']['type'] = 'rose'
            else:
                raise GetDataError('Cannot get data: %s with kwargs %s' % (uri, kwargs))
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))
        return res

    @support_wws
    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_WRF_PM_Dataset(FG_WRF_DatasetBase):
    zipped_fields = {'varname', 'level'}

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_PM_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = self.pm_varnames
        self.uri_field_values = {
            'varname': self.varnames,
            'level': self.levels
        }

    @property
    def schema(self):
        res = {
            'arch': 'subset',
            'name': 'PM',
            'type': '',
            'subs': [],
        }
        if self.pm_stat_format == "json":
            var_zipped = True
            coord_zipped = True
        else:
            var_zipped = False
            coord_zipped = False
        for vn in self.pm_varnames:
            levels = self.levels
            level_default = ['70']
            if self.version >= 2:
                if vn in fgwrf_combine_varname_d:
                    levels = self.levels + fgwrf_combine_varname_d[vn].keys()
            if vn in self.sfc_vars:
                levels = ['sfc']
                level_default = ['sfc']
            if level_default[0] not in levels:
                level_default = [levels[0]]

            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'VLT',
                'datatype': 'monthlystat',
                'formats': ['json', 'hc_figure'],
                'zipped': var_zipped,
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]],
                    "can_poly": True,
                }, {
                    "name": "level",
                    "values": levels,
                    "zipped": coord_zipped,
                    "default": level_default,
                    "can_poly": True,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>",
                }]
            })
        return res

    @cache_data
    @pack_data
    @perm_data
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        sample = kwargs.get('sample', False)
        uri_info = kwargs.get('uri_info', None)
        if uri_info is None:
            uri_info = parse_fgwrf_uri(uri)

        try:
            if sample:
                # subset_uri = uri_info['subset'] + '_sample'
                subset_uri = uri_info['subset']
            else:
                subset_uri = uri_info['subset']

            if self.pm_stat_format == "json":
                common = {}
                s3uri = '%s/%s/%s/%s/%s/%s/%s.json' % (
                    self.dataset_storage_name,
                    subset_uri,
                    'ALL',
                    uri_info['time'],
                    uri_info['domain'],
                    'ALL',
                    uri_info['jy_ix'])
                fc = raw_tiny_fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath
                raw_pack = json.load(open(fname), strict=False)
                common = raw_pack['common']
                common['info'].pop('substitute_coords')

                thetag = None
                for tag, unituri in raw_pack['tags'].iteritems():
                    if len(unituri.split('/')) == 7:
                        unituri = fgwrf_uri_old2new(unituri)
                    if unituri == uri:
                        thetag = tag
                        break
                if thetag:
                    res = raw_pack['contents'][thetag]
                    if common:
                        for k, v in common.iteritems():
                            if k not in res:
                                res[k] = v
                            else:
                                v.update(res[k])
                                res[k] = v
                    amend_dataunit(res, info={
                        "info": {
                            "sample": sample
                        }
                    })
            elif self.pm_stat_format == 'nc':
                vn = uri_info['varname']
                combine_vars = fgwrf_combine_varname_d.get(vn, {})
                true_vn = combine_vars.get(uri_info['level'], vn)
                jy, ix = [int(tk) for tk in uri_info['jy_ix'].split('_')]
                s3uri = '%s/%s/%s/%s/%s/%s/ALL.nc' % (
                    self.dataset_storage_name,
                    subset_uri,
                    'stat',
                    uri_info['time'],
                    uri_info['domain'],
                    uri_info['level'],
                )
                fc = raw_medium_fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath
                with Dataset(fname) as ds:
                    v = ds.variables[true_vn]
                    values = v[:, jy, ix]
                    stat_method = v.stat_method
                    std_vn = true_vn + '_std'
                    grid_lon, grid_lat = self.domainer.ij_to_lonlat(jy, ix, domain=uri_info['domain'])
                    if std_vn in ds.variables:
                        std = ds.variables[std_vn][:, jy, ix]
                    else:
                        std = None
                    count = ds.variables['count'][:]
                if vn in ('swdown', 'glw', 'ghi', 'dni', 'ddif', 'ddir'):
                    values /= 1000.0  # kWh/m^2

                res = {
                    "type": "dataunit",
                    "info": {
                        "dataset": uri_info['dataset'],
                        "zh_dataset": self.zh_name,
                        "subset": uri_info['subset'],
                        "zh_subset": fgwrf_zh_subset_d[uri_info['subset']],
                        "varname": vn,
                        "zh_varname": fgwrf_zh_varname_d.get(vn, vn),
                        "type": "monthlystat",
                        "grid_lon": grid_lon,
                        "grid_lat": grid_lat,
                        "sample": sample,
                    },
                    "data": {
                        "values": values,
                        "std": std,
                        "count": count,
                        "units": fgwrf_pm_units_d.get(vn, ''),
                        "stat_method": stat_method,
                        "category_name": "month",
                        "zh_category_name": u"月份",
                        "categories": [
                            "01",
                            "02",
                            "03",
                            "04",
                            "05",
                            "06",
                            "07",
                            "08",
                            "09",
                            "10",
                            "11",
                            "12"
                        ],
                    },
                    "coords": {
                        "dataset": uri_info['dataset'],
                        "subset": "PM",
                        "varname": vn,
                        "time": uri_info['time'],
                        "level": uri_info['level'],
                        "grid": uri_info["grid"],
                    }
                }
            else:
                raise GetDataError('Cannot get data: %s with kwargs %s' % (uri, kwargs))
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_WRF_PD_Dataset(FG_WRF_DatasetBase):
    zipped_fields = {'varname', 'level'}

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_PD_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = self.pd_varnames
        self.uri_field_values = {
            'varname': self.varnames,
            'level': self.levels
        }

    @property
    def schema(self):
        res = {
            'arch': 'subset',
            'name': 'PD',
            'type': '',
            'subs': [],
        }
        if self.pd_stat_format == "json":
            var_zipped = True
            coord_zipped = True
        else:
            var_zipped = False
            coord_zipped = False
        for vn in self.pd_varnames:
            levels = self.levels
            level_default = ['70']
            if self.version >= 2:
                if vn in fgwrf_combine_varname_d:
                    levels = self.levels + fgwrf_combine_varname_d[vn].keys()
            if vn in self.sfc_vars:
                levels = ['sfc']
                level_default = ['sfc']
            if level_default[0] not in levels:
                level_default = [levels[0]]

            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'VLT',
                'datatype': 'diurnalstat',
                'formats': ['json', 'hc_figure'],
                'zipped': var_zipped,
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]],
                    "can_poly": True,
                }, {
                    "name": "level",
                    "values": levels,
                    "zipped": coord_zipped,
                    "default": level_default,
                    "can_poly": True,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>",
                }]
            })
        return res

    @cache_data
    @pack_data
    @perm_data
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        sample = kwargs.get('sample', False)
        uri_info = kwargs.get('uri_info', None)
        if uri_info is None:
            uri_info = parse_fgwrf_uri(uri)

        # 时区暂时未用上.
        if uri_info['timezone'] != 0:
            utc_offset = uri_info['timezone']
        else:
            utc_offset = int(kwargs.get('utc_offset', 8))
        uri_tail = '?utc_offset=%s' % utc_offset

        try:
            if sample:
                # subset_uri = uri_info['subset'] + '_sample'
                subset_uri = uri_info['subset']
            else:
                subset_uri = uri_info['subset']

            if self.pd_stat_format == "json":
                common = {}
                s3uri = '%s/%s/%s/%s/%s/%s/%s.json' % (
                    self.dataset_storage_name,
                    subset_uri,
                    'ALL',
                    uri_info['time'],
                    uri_info['domain'],
                    'ALL',
                    uri_info['jy_ix'])
                fc = raw_tiny_fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath
                raw_pack = json.load(open(fname), strict=False)
                common = raw_pack['common']
                common['info'].pop('substitute_coords')

                thetag = None
                for tag, unituri in raw_pack['tags'].iteritems():
                    if len(unituri.split('/')) == 7:
                        unituri = fgwrf_uri_old2new(unituri)
                    if unituri == uri:
                        thetag = tag
                        break
                if thetag:
                    res = raw_pack['contents'][thetag]
                    if common:
                        for k, v in common.iteritems():
                            if k not in res:
                                res[k] = v
                            else:
                                v.update(res[k])
                                res[k] = v
                    amend_dataunit(res, info={
                        "info": {
                            "sample": sample
                        },
                        "data": {
                            "utc_offset": utc_offset
                        }
                    })
            elif self.pd_stat_format == 'nc':
                vn = uri_info['varname']
                combine_vars = fgwrf_combine_varname_d.get(vn, {})
                true_vn = combine_vars.get(uri_info['level'], vn)
                jy, ix = [int(tk) for tk in uri_info['jy_ix'].split('_')]
                s3uri = '%s/%s/%s/%s/%s/%s/ALL.nc' % (
                    self.dataset_storage_name,
                    subset_uri,
                    'stat',
                    uri_info['time'],
                    uri_info['domain'],
                    uri_info['level'],
                )
                fc = raw_medium_fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath
                with Dataset(fname) as ds:
                    v = ds.variables[true_vn]
                    values = v[:, jy, ix]
                    stat_method = v.stat_method
                    std_vn = true_vn + '_std'
                    grid_lon, grid_lat = self.domainer.ij_to_lonlat(jy, ix, domain=uri_info['domain'])
                    if std_vn in ds.variables:
                        std = ds.variables[std_vn][:, jy, ix]
                    else:
                        std = None
                    count = ds.variables['count'][:]

                res = {
                    "type": "dataunit",
                    "info": {
                        "dataset": uri_info['dataset'],
                        "zh_dataset": self.zh_name,
                        "subset": uri_info['subset'],
                        "zh_subset": fgwrf_zh_subset_d[uri_info['subset']],
                        "varname": vn,
                        "zh_varname": fgwrf_zh_varname_d.get(vn, vn),
                        "type": "diurnalstat",
                        "grid_lon": grid_lon,
                        "grid_lat": grid_lat,
                        "sample": sample,
                    },
                    "data": {
                        "values": values,
                        "std": std,
                        "count": count,
                        "units": fgwrf_pd_units_d.get(vn, ''),
                        "stat_method": stat_method,
                        "category_name": "hour",
                        "zh_category_name": u"小时",
                        "categories": ["%02d" % h for h in range(24)],
                    },
                    "coords": {
                        "dataset": uri_info['dataset'],
                        "subset": "PD",
                        "varname": vn,
                        "time": uri_info['time'],
                        "grid": uri_info["grid"],
                    }
                }
            else:
                raise GetDataError('Cannot get data: %s with kwargs %s' % (uri, kwargs))
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    @support_packunitjson
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        pass


class FG_WRF_RY_Dataset(FG_WRF_DatasetBase):
    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_RY_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.varnames = self.ry_varnames

    @property
    def schema(self):
        res = {
            'arch': 'subset',
            'name': 'RY',
            'type': '',
            'subs': [],
        }
        for vn in self.ry_varnames:
            levels = self.levels
            level_default = ['70']
            if self.version >= 2:
                if vn in fgwrf_combine_varname_d:
                    levels = self.levels + fgwrf_combine_varname_d[vn].keys()
            if vn in self.sfc_vars:
                levels = ['sfc']
                level_default = ['sfc']
            if level_default[0] not in levels:
                level_default = [levels[0]]

            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'VLT',
                'datatype': 'yearlystat',
                'formats': ['rect_figure', 'kmz'],
                'zipped': False,
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]]
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "zipped": False,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>"
                }]
            })
        return res

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            'arch': 'subset',
            'name': 'RY',
            'type': '',
            'subs': [],
        }
        for vn in self.ry_varnames:
            levels = self.levels
            level_default = ['70']
            if self.version >= 2:
                if vn in fgwrf_combine_varname_d:
                    levels = self.levels + fgwrf_combine_varname_d[vn].keys()
            if vn in self.sfc_vars:
                levels = ['sfc']
                level_default = ['sfc']
            if level_default[0] not in levels:
                level_default = [levels[0]]

            formats = ['rect_figure', 'kmz']
            # if user_has_token(request.user, 'download_ry_data:{}'.format(self.topset.name)):
            #    formats = ['csv', 'nc', 'json'] + formats

            res['subs'].append({
                'arch': 'variable',
                'name': vn,
                'vartype': 'VLT',
                'datatype': 'yearlystat',
                'formats': formats,
                'zipped': False,
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    # "default": [self.default_year]
                }, {
                    "name": "level",
                    "values": levels,
                    "default": level_default,
                    "zipped": False,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>"
                }]
            })
        return res

    @cache_data(valid_params={'dataunit': ['datatype', 'user']})
    @provide_figinfo(suggest_cmap_dict=fgwrf_suggest_cmap_d)
    @perm_data
    def get_data(self, uri, request=None, *args, **kwargs):
        datatype = kwargs.get('datatype', 'dataunit')
        sample = kwargs.get('sample', False)
        uri_info = kwargs.get('uri_info', None)
        if uri_info is None:
            uri_info = parse_fgwrf_uri(uri)

        try:
            varname = uri_info['varname']

            if self.version >= 2:
                true_subset = 'PY'
                vn = 'stat'
            else:
                true_subset = 'RY'
                vn = 'mean'

            if sample:
                # subset_uri = uri_info['subset'] + '_sample'
                subset_uri = true_subset
            else:
                subset_uri = true_subset

            # TODO: handle * -> ALL

            # TODO handle domain == L1, L2, etc.
            domain = uri_info['domain']
            if domain.startswith('L'):
                ljy, lix = uri_info['jy_ix'].split('_')
                ljy1, ljy2 = ljy.split(':')
                lix1, lix2 = lix.split(':')
                ljy1 = int(ljy1);
                ljy2 = int(ljy2)
                lix1 = int(lix1);
                lix2 = int(lix2)
                domain_level = int(domain[1:])
                subinfos = self.domainer.level_region_to_domain_regions(domain_level,
                                                                        ix1=lix1,
                                                                        ix2=lix2,
                                                                        jy1=ljy1,
                                                                        jy2=ljy2,
                                                                        reverse_order=True,
                                                                        trim=True
                                                                        )

                stack_data = np.zeros((len(subinfos), ljy2 - ljy1, lix2 - lix1), 'f4')
                weight = np.zeros((len(subinfos), ljy2 - ljy1, lix2 - lix1), 'f4')
                # stack_data[:] = np.nan

                LIX, LJY = np.meshgrid(np.arange(lix1, lix2), np.arange(ljy1, ljy2))
                lon, lat = self.domainer.level_ij_to_lonlat(LIX, LJY, domain_level)

                for i, sub in enumerate(subinfos):
                    s3uri = '%s/%s/%s/%s/%s/%s/ALL.nc' % (
                        self.dataset_storage_name,
                        subset_uri,
                        vn,  # uri_info['varname'],
                        uri_info['time'],
                        sub['domain'],
                        uri_info['level'],
                    )
                    fc = raw_small_fc_manager.get(s3uri, when_not_exist=['fetch'])
                    fname = fc.filepath

                    jy_beg = sub['jy1'];
                    jy_end = sub['jy2'];
                    ix_beg = sub['ix1'];
                    ix_end = sub['ix2'];

                    with Dataset(fname, 'r') as ncf:
                        # 临时修正td sfc的问题, 即文件中的变量名为ts
                        varname = uri_info['varname']
                        combine_vars = fgwrf_combine_varname_d.get(varname, {})
                        using_varname = combine_vars.get(uri_info['level'], varname)

                        v = ncf.variables[using_varname]
                        subdata = v[0, jy_beg:jy_end, ix_beg:ix_end]

                    stack_data[i, jy_beg + sub['jy_offset'] - ljy1: jy_end + sub['jy_offset'] - ljy1,
                    ix_beg + sub['ix_offset'] - lix1: ix_end + sub['ix_offset'] - lix1] = subdata

                    w_arr = np.linspace(0.1, 1.0, 9) ** 2
                    for shrink in range(len(w_arr)):
                        weight[i,
                        jy_beg + sub['jy_offset'] - ljy1 + shrink: max(0, jy_end + sub['jy_offset'] - ljy1 - shrink),
                        ix_beg + sub['ix_offset'] - lix1 + shrink: max(0, ix_end + sub['ix_offset'] - lix1 - shrink)] = \
                            w_arr[shrink]

                for i in range(1, len(subinfos)):
                    rest_max_weight = 1.0 - weight[0:i].sum(axis=0)
                    now_layer = weight[i]
                    w_exceeded = np.where(now_layer > rest_max_weight)
                    now_layer[w_exceeded] = rest_max_weight[w_exceeded]
                    del now_layer
                    del rest_max_weight
                    del w_exceeded

                data = np.ma.masked_invalid((stack_data * weight).sum(axis=0) / weight.sum(axis=0)).filled(np.nan)
                del weight
                del stack_data
            else:
                s3uri = '%s/%s/%s/%s/%s/%s/ALL.nc' % (
                    self.dataset_storage_name,
                    subset_uri,
                    vn,  # uri_info['varname'],
                    uri_info['time'],
                    uri_info['domain'],
                    uri_info['level'],
                )
                fc = raw_medium_fc_manager.get(s3uri, when_not_exist=['fetch'])
                fname = fc.filepath

                jy_ix = uri_info['jy_ix']
                jy, ix = jy_ix.split('_')
                jy_beg, jy_end = jy.split(':')
                ix_beg, ix_end = ix.split(':')
                jy_beg = int(jy_beg);
                jy_end = int(jy_end)
                ix_beg = int(ix_beg);
                ix_end = int(ix_end)

                with Dataset(fname, 'r') as ncf:
                    # 临时修正td sfc的问题, 即文件中的变量名为ts
                    varname = uri_info['varname']
                    if varname == 'td' and uri_info['level'] == 'sfc':
                        using_varname = 'ts'
                    else:
                        using_varname = varname

                    v = ncf.variables[using_varname]
                    data = v[0, jy_beg:jy_end, ix_beg:ix_end]
                    lat = ncf.variables['lat'][jy_beg:jy_end, ix_beg:ix_end]
                    lon = ncf.variables['lon'][jy_beg:jy_end, ix_beg:ix_end]

            if varname in ("swdown", "glw", "dni", "ghi", "ddir", "ddif"):
                data /= 1000.0  # convert to kWh m^-2

            res = {
                "type": "dataunit",
                "uri": uri,
                "info": {
                    "varname": varname,
                    "type": 'rect_yearlystat',
                    "sample": sample
                },
                "coords": {
                    "dataset": uri_info['dataset'],
                    "subset": uri_info['subset'],
                    "varname": uri_info['varname'],
                    "time": uri_info['year'],
                    "level": uri_info['level'],
                    "grid": uri_info['grid']
                },
                "data": {
                    "lat": lat,
                    "lon": lon,
                    "values": data,
                    "suggest_range": fgwrf_py_suggest_range_d.get(varname, (None, None)),
                    "suggest_cmap": fgwrf_suggest_cmap_d.get(varname, 'jet'),
                    "units": fgwrf_py_units_d.get(varname, ''),
                }
            }
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(unicode(e))

        return res

    def get_file(self, uri, dest, request=None, *args, **kwargs):
        return self.RY_get_file(uri, dest, request=request, *args, **kwargs)


class FG_WRF_Dataset(FG_WRF_DatasetBase):
    mold_name = 'WRF'
    subset_classes = [
        {'name': 'PointYearlyStat', 'uri': 'PY', 'class': FG_WRF_PY_Dataset},
        {'name': 'PointMonthlyStat', 'uri': 'PM', 'class': FG_WRF_PM_Dataset},
        {'name': 'PointDiurnalStat', 'uri': 'PD', 'class': FG_WRF_PD_Dataset},
        {'name': 'PointTimeseries', 'uri': 'PT', 'class': FG_WRF_PT_Dataset},
        {'name': 'RectYearlyStat', 'uri': 'RY', 'class': FG_WRF_RY_Dataset},
    ]
    tags = ['wind', 'wrf', 'meteorology']
    info_template = """{
    "WRF_version": "",
    "levels": ["10", "70", "80", "100"],
    "years": ["YYYY"],
    "begdt": "YYYY-mm-dd HH",
    "enddt": "YYYY-mm-dd HH",
    "domain_info": {
        "1": {
            "available": true,
            "priority": 9,
            "trim": 0
        },
        "2": {
            "available": true,
            "priority": 1,
            "trim": 0
        },
        "3": {
            "available": true,
            "priority": 2,
            "trim": 0
        }
    }
}"""

    def __init__(self, name, uri, info, *args, **kwargs):
        super(FG_WRF_Dataset, self).__init__(name, uri, info, *args, **kwargs)
        self.desc = info.get('desc', info.get('zh_name', name))

    @classmethod
    def get_mold_desc(cls):
        return 'WRF dataset'

    @classmethod
    def import_dataset(cls, src, dest, meta=None, *args, **kwargs):
        import_res = super(FG_WRF_Dataset, cls).import_dataset(src, dest, meta, *args, **kwargs)
        if isinstance(import_res, dict):
            namelist_fname = os.path.join(dest, 'namelist.wps')
            namelist = open(namelist_fname).read()
            # TODO: check
            print namelist
            print import_res
            import_res['info']['wps_namelist'] = namelist
        return import_res

    def get_schema(self, request=None, *args, **kwargs):
        res = {
            "arch": "dataset",
            "mold": self.mold_name,
            "type": "DSV",
            "name": self.name,
            "zh_name": self.zh_name,
            "desc": self.desc,
            "common": {
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    # "default": [self.default_year],
                    "can_poly": True,
                }, {
                    "name": "level",
                    "values": self.levels,
                    "zipped": True,
                    "default": ['70'],
                    "can_poly": True,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>",
                }]
            },
            "varname_dict": fgwrf_zh_varname_d,
            "subset_dict": fgwrf_zh_subset_d,
            "subs": []
        }
        for class_info in self.subset_classes:
            sub_schema = self.uri_subsets[class_info["uri"]].get_schema(request, *args, **kwargs)
            res["subs"].append(sub_schema)
        return res

    @property
    def schema(self):
        res = {
            "arch": "dataset",
            "mold": self.mold_name,
            "type": "DSV",
            "name": self.name,
            "zh_name": self.zh_name,
            "desc": self.desc,
            "common": {
                "coords": [{
                    "name": "time",
                    "values": self.years,
                    "default": [self.years[-1]],
                    "can_poly": True,
                }, {
                    "name": "level",
                    "values": self.levels,
                    "zipped": True,
                    "default": ['70'],
                    "can_poly": True,
                }, {
                    "name": "version",
                    "values": '<%= versions %>',
                    "replace_name": "grid",
                    "replace_dict": '<%= version_replace_dict %>',
                    "default": "<%= version_default %>",
                }]
            },
            "varname_dict": fgwrf_zh_varname_d,
            "subset_dict": fgwrf_zh_subset_d,
            "subs": []
        }
        for class_info in self.subset_classes:
            sub_schema = self.uri_subsets[class_info["uri"]].schema
            res["subs"].append(sub_schema)
        return res

    def lookup(self, info, request=None, *args, **kwargs):
        # return info (for data_briefs) according to given lon, lat, etc.
        try:
            results = []
            if info['type'] == 'point':
                valid_points = self.domainer.lookup_lonlat(lon=float(info['lon']), lat=float(info['lat']),
                                                           int_index=True, trim=True, level_unique=True)
                if len(valid_points) == 0:
                    return results

                res = {
                    "dataset": self.name,
                    "subsets": ["PT", "PY", "PM", "PD"],
                    "versions": [],
                    "version_replace_dict": {},
                }
                for point_info in valid_points:
                    jy_ix = '%03d_%03d' % (point_info['jy'], point_info['ix'])
                    resolution = point_info['resolution']
                    domain = str(point_info['domain'])
                    res['versions'].append(resolution)
                    res['version_replace_dict'][resolution] = '%s_%s' % (domain, jy_ix)
                    res['version_default'] = [resolution]
                results.append(res)
            elif info['type'] == 'rect':
                # TODO:
                # merge_info = self.domainer....
                valid_rects = self.domainer.lookup_lonlat_rect(
                    lon1=float(info['lon1']),
                    lat1=float(info['lat1']),
                    lon2=float(info['lon2']),
                    lat2=float(info['lat2']),
                    by='domain_level',
                    array_format='list',
                    domain_border_trim=True,
                )
                if len(valid_rects) == 0:
                    return results

                res = {
                    "dataset": self.name,
                    "subsets": ['RY'],
                    "versions": [],
                    "version_replace_dict": {},
                }
                for merge_info in valid_rects:
                    jy_ix = '%03d:%03d_%03d:%03d' % (
                        merge_info['jy1'], merge_info['jy2'],
                        merge_info['ix1'], merge_info['ix2'],
                    )
                    resolution = merge_info['resolution']
                    domain = str(merge_info['domain'])
                    res['versions'].append(resolution)
                    res['version_replace_dict'][resolution] = '%s_%s' % (domain, jy_ix)
                    res['version_default'] = [resolution]
                results.append(res)

            return results
        except Exception as e:
            raise LookupError(u'[%s] %s' % (type(e), unicode(e)))

    def get_data(self, uri, request=None, *args, **kwargs):
        # parse_uri
        # parse dt and decide to use which files.
        # extract data and return in specified format
        try:
            if 'uri_info' in kwargs:
                uri_info = kwargs['uri_info']
            else:
                uri_info = parse_fgwrf_uri(uri)
            if uri_info['dataset'] != self.uri:
                return None

            subset = self.uri_subsets.get(uri_info['subset'], None)
            if subset is None:
                return None

            res = subset.get_data(uri, request=request, *args, **kwargs)
            if res == {}:
                return res

            return res
        except GetDataError as e:
            raise e
        except Exception as e:
            raise GetDataError(u'[%s] %s' % (type(e), unicode(e)))

    @protect_hack
    def get_file(self, uri, dest, request=None, *args, **kwargs):
        try:
            uri_info = parse_fgwrf_uri(uri)
            subset = self.uri_subsets.get(uri_info['subset'], None)
            if subset is None:
                raise ValueError("No such subset in WRF: %s" % uri_info['subset'])

            getres = subset.get_file(uri, dest, request=request, uri_info=uri_info, *args, **kwargs)
            return getres
        except Exception as e:
            raise GetFileError(u'[%s] %s' % (type(e), unicode(e)))
