# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import re
from datetime import datetime, timedelta


def read_micaps_3(fname, limit=None):
    """
    Read Micaps 3 type file (general scatter point)

    1 此类数据主要用于非规范的站点填图。填图目前是单要素的。
    2 此类数据除用于填图外，还可根据站点数据用有限元法直接画等值线
      (只要等值线条数大于 0)。各等值线的值由文件头中的等值线值1、
      等值线值2 ...来决定。在这些等值线值中可选出一个为加粗线值。
    3 等值线可以被限制在一个剪切区域内。剪切区域由一个闭合折线定义
      该折线构成剪切区域的边缘。这个折线由剪切区域边缘线上的点数及
      各点的经纬度决定。
    4 当填的是地面要素时，文件头中的“层次”变为控制填图格式的标志：
        -1 填6小时降水量。当降水量为0.0mm时填T，当降水量为0.1-0.9时
           填一位小数，当降水量大于1时只填整数。
        -2 填24小时降水量。当降水量小于1mm时不填，大于等于1mm时只填整数。
        -3 填温度。只填整数。

    注意按照MICAPS3.2扩展的数据格式定义，在6小时雨量中，0.0表示微量降水
    而不是无降水，上述类别数据填图属性中设置小数位数不起作用。考虑到实际
    业务中使用的数据格式，修改为0.0时表示无降水，大于0并且小于0.1为微量降水
    任意使用负值或9999表示降水为0，可能会导致数据分析中出现异常结果。

    :param fname: micaps file name.
    :param limit: region limit, [min_lat, min_lon, max_lat, max_lon]
    :return: data, pandas type

    :Examples:
    >>> data = read_micaps_3('Z:/data/surface/jiany_rr/r20/17032908.000')

    """

    # check file exist
    if not os.path.isfile(fname):
        return None

    # read contents
    # encodings = ['utf-8', 'gb18030', 'GBK']
    # for encoding in encodings:
    #     txt = None
    #     try:
    #         with open(fname, 'r', encoding=encoding) as f:
    #             txt = f.read().replace('\n', ' ').split()
    #     except Exception:
    #         pass
    #     if txt is not None:
    #         break

    try:
        with open(fname, 'r', encoding='GBK') as f:
            txt = f.read().replace('\n', ' ').split()
    except Exception:
        txt = None
        pass
    if txt is None:
        # print("Micaps 3 file error: " + fname)
        return None

    # head information
    head_info = txt[2]

    # date and time
    year = int(txt[3]) if len(txt[3]) == 4 else int(txt[3]) + 2000
    month = int(txt[4])
    day = int(txt[5])
    hour = int(txt[6])
    time = datetime(year, month, day, hour)

    # level
    level = int(txt[7])

    # contour information
    n_contour = int(txt[8])
    pt = 9
    if n_contour > 0:
        contours = np.array(txt[pt:(pt + n_contour - 1)])
        pt += n_contour

    # smooth and bold value
    smoothCeof = float(txt[pt])
    pt += 1
    boldCeof = float(txt[pt])
    pt += 1

    # boundary
    n_bound = int(txt[pt])
    pt += 1
    if n_bound > 0:
        bound = np.array(txt[pt:(pt + 2 * n_bound - 1)])
        pt += 2 * n_bound

    # number of elements and data
    n_elements = int(txt[pt])
    pt += 1
    number = int(txt[pt])
    pt += 1

    # cut data
    txt = np.array(txt[pt:])
    txt.shape = [number, n_elements + 4]

    # initial data
    columns = list(['ID', 'lon', 'lat', 'alt'])
    columns.extend(['Var%s' % x for x in range(n_elements)])
    data = pd.DataFrame(txt, columns=columns)

    # convert column type
    for column in data.columns:
        if column == 'ID':
            continue
        data[column] = pd.to_numeric(data[column], errors="coerce")
        data[column].mask(data[column] == 9999.0, inplace=True)

    # cut the region
    if limit is not None:
        data = data[(limit[0] <= data['lat']) & (data['lat'] <= limit[2]) &
                    (limit[1] <= data['lon']) & (data['lon'] <= limit[3])]

    # check records
    if len(data) == 0:
        return None

    # add time
    data['time'] = time
    data['level'] = level

    # return
    return data


def read_micaps_4(fname, scale_off=None, get_header=False):
    """
    Read Micaps 4 type file (grid)

    :param get_header: return header
    :param fname: micaps file name.
    :param scale_off: [scale, offset], return values = values*scale + offset.
    :return: data

    """

    # check file exist
    if not os.path.isfile(fname):
        return None

    # read contents
    encodings = ['utf-8', 'GBK']
    for encoding in encodings:
        txt = None
        header = None
        try:
            with open(fname, 'r', encoding=encoding) as f:
                f = f.read()
                if get_header:
                    fs = f.split('\n')
                    length = 0
                    head = ''
                    for line in fs:
                        length += len(line.split())
                        if length <= 22:
                            head += line + '\n'
                        else:
                            header = head.rstrip()
                            break

                txt = f.replace('\n', ' ').split()

                if txt is not None:
                    break
        except Exception:
            pass

    if txt is None:
        print("Micaps 4 file error: " + fname)
        return None

    # head information
    head_info = txt[2]

    # date and time
    year = int(txt[3]) if len(txt[3]) == 4 else int(txt[3]) + 2000
    month = int(txt[4])
    day = int(txt[5])
    hour = int(txt[6])
    fhour = int(txt[7])
    if hour >= 24:  # some times, micaps file head change the order.
        hour = int(txt[7])
        fhour = int(txt[6])
    fhour = np.array([fhour], dtype=np.float)
    init_time = datetime(year, month, day, hour)
    time = init_time + timedelta(hours=fhour[0])
    init_time = np.array([init_time], dtype='datetime64[ms]')
    time = np.array([time], dtype='datetime64[ms]')

    # vertical level
    level = np.array([float(txt[8])])

    # grid information
    xint = float(txt[9])
    yint = float(txt[10])
    slon = float(txt[11])
    slat = float(txt[13])
    nlon = int(txt[15])
    nlat = int(txt[16])
    # lon = slon + np.arange(nlon) * xint
    # lat = slat + np.arange(nlat) * yint

    # extract data
    data = (np.array(txt[22:])).astype(np.float)
    data.shape = [nlat, nlon]

    # check latitude order
    if yint < 0:
        data = data[::-1, :]
        if get_header:
            header = re.sub(f'{xint} *{yint}', f'{xint} {-yint}', header)

    # scale and offset the data, if necessary.
    if scale_off is not None:
        data = data * scale_off[0] + scale_off[1]

    if get_header:
        return header, data
    else:
        return data


def write_micaps_4(fname, data, header, encoding='GBK'):
    np.savetxt(fname, data, header=header.rstrip(), fmt=' %.2f', encoding=encoding, comments='')


if __name__ == '__main__':
    data = read_micaps_4('/home/spd/datasets/guangxi/TMAX24/17083108.024', scale_off=[1, 273.15])
    # write_micaps_4('tt1.000', data, header)
