# -*- coding: utf-8 -*-
"""micaps数据可直接读取，但有错误数据，制作数据集前需要先数据清洗"""
import random
from glob import glob
from os.path import dirname, basename, isfile, join
from pygrib import open as pyopen
import numpy as np
from utils.read_micaps import read_micaps_3, read_micaps_4
from utils.tools import get_time
from utils.config import *
from multiprocessing import Pool, cpu_count
from datetime import datetime, timedelta

processCnt = int(cpu_count() * 0.7) if core_count == 0 else core_count

datasets_logger = Logger(filename=f'{logpath}/dataset_{datetime.now().year}-{datetime.now().month}.log', level='info',
                         when='D', back_count=30).logger


class Datasets:
    def __init__(self, dataEndTime=currentTime, dataDateCnts=datadays, valDataDateCnts=valdays):
        datasets_logger.info(f"Data End Time:{dataEndTime}")
        datasets_logger.info(f'Data Days:{dataDateCnts}')
        datasets_logger.info(f'AUX Val Data Days:{valDataDateCnts}')
        self.dataEndTime = dataEndTime
        self.dataDateCnts = dataDateCnts
        self.valDataDateCnts = valDataDateCnts
        self.last_valDate_start = (self.dataEndTime - timedelta(days=365 + int(self.valDataDateCnts / 2))).strftime(
            '%Y%m%d')
        self.last_valDate_end = (self.dataEndTime - timedelta(days=365 - int(self.valDataDateCnts * 1.5))).strftime(
            '%Y%m%d')
        self.dataStartDate = (self.dataEndTime - timedelta(days=self.dataDateCnts)).strftime('%Y%m%d')
        self.valData_StartDate = (self.dataEndTime - timedelta(days=self.valDataDateCnts)).strftime('%Y%m%d')
        self.dataEndDate = self.dataEndTime.strftime('%Y%m%d')
        self.filter_StartDate = self.dataStartDate if initTrain else self.valData_StartDate

        self.yearSteps = list(range(int(self.dataStartDate[:4]), int(self.dataEndDate[:4]) + 1))

    def make_allTxt(self, file):
        T_cls = basename(dirname(file))

        error_TxtFile = f'{txt_path}/{T_cls}/{T_cls}_error_data.txt'
        error_dataTxt = open(error_TxtFile, 'a+')
        error_dataTxt.seek(0)
        error_data_list = [d.strip() for d in error_dataTxt.readlines()]

        with open(join(txt_path, T_cls, f"{T_cls}_all.txt"), 'a') as allTxt:

            if basename(file) in error_data_list:
                return

            datatime, hstep = basename(file).split('.')
            data_date = '20' + datatime[:6]
            fcst_date = get_time('20' + datatime, hours=hstep)[:8]

            if int(data_date) < int(self.dataStartDate) or int(fcst_date) >= int(self.dataEndDate):
                return

            hstep = int(basename(file).split('.')[1])
            if stepStart <= hstep <= stepEnd:
                if int(data_date) >= int(self.filter_StartDate):
                    try:
                        data = read_micaps_4(file)

                        if data.max() > Tem_max or data.min() < Tem_min:
                            if basename(file) not in error_data_list:
                                errmsg = f"SCMOC DATA ERROR :{file}, MAX:{data.max()} MIN:{data.min()}"
                                datasets_logger.error(errmsg)
                                error_dataTxt.write(basename(file) + '\n')
                            else:
                                pass
                        else:
                            allTxt.write(file + '\n')
                    except AttributeError:
                        pass
                else:
                    allTxt.write(file + '\n')
            else:
                pass
                # return

        error_dataTxt.close()

    def mk_cldas(self, cfile):
        cldas_categorys = {'MXT': ['TMAX', 'Maximum temperature'],
                           'MNT': ['TMIN', 'Minimum temperature'],
                           'TEM': ['TEM03', '2 metre temperature'],
                           }

        fname_split = basename(cfile).split('-')
        cldasTime = fname_split[-1].split('.')[0]
        Tc = fname_split[-2]

        category = cldas_categorys[Tc]

        Tcategory = category[0]

        grbName = category[1]
        if Tcategory == 'TEM03' and (int(cldasTime[-2:]) not in np.linspace(2, 23, 8, dtype=np.int)):
            return

        cldas_file = f"{cldas_npy_path}/{Tcategory}/{cldasTime[2:]}.npy"

        if isfile(cldas_file) and int(cldasTime[:8]) < config['model info'].getint('last_train_date'):
            past_data = True
        else:
            past_data = False

        if not past_data and int(self.dataStartDate) < int(cldasTime[:8]) <= int(self.dataEndTime.strftime('%Y%m%d')):

            grbs = pyopen(cfile)
            grbs.seek(0)
            try:
                grb = grbs.select(name=grbName)[0]
            except ValueError:
                datasets_logger.error(f"ERROR GRIB FILE: {cfile}")
                return

            area_data, lats, lons = grb.data(lat1=latmin, lat2=latmax, lon1=lonmin, lon2=lonmax)

            if lats[0, 0] > lats[1, 0]:
                area_data = area_data[::-1, :]
                datasets_logger.info("Latitude Switch !")

            if (area_data.max() - C_K) > Tem_max or (area_data.min() - C_K) < Tem_min:  # 过滤异常文件

                datasets_logger.error(
                    f"ERROR GRIB DATA:{cfile}\n{grb}\nTMAX={round(area_data.max())}, TMIN={round(area_data.min())}")
                return

            if revise_cldas:
                sfname = Tcategory[1:4] + '_24H' if Tcategory in ['TMAX', 'TMIN'] else 'TEM'
                station_file = f'{station_micaps_path}/micaps{cldasTime[:4]}/micaps{cldasTime[:6]}/SURFACE/TMP_{sfname}_NATIONAL/{cldasTime}0000.000'
                if not isfile(station_file):
                    datasets_logger.error(f"No Station File:{station_file}")

                else:
                    station_data = read_micaps_3(station_file)

                    if station_data is None:
                        datasets_logger.error(f"Station File Error:{station_file}")

                    else:
                        for i, sid in enumerate(station_data.ID):

                            if sid not in National_station_ids:
                                continue

                            Latitude = station_data.lat[i]
                            Longitude = station_data.lon[i]
                            TEM = station_data.Var0[i]
                            if not Tem_min < TEM < Tem_max:
                                datasets_logger.error(
                                    f"Station Point Data Error:STATION ID:{sid}, Tem:{TEM}, file:{basename(station_file)}")
                            STATION_TEM = TEM + C_K

                            x_axis = int((Latitude - latmin) / grid + 0.5)
                            y_axis = int((Longitude - lonmin) / grid + 0.5)
                            if x_axis >= area_width or y_axis >= area_height or x_axis < 0 or y_axis < 0:
                                continue

                            if revise_points:
                                diff = STATION_TEM - area_data[x_axis, y_axis]
                                w0 = max(0, x_axis - revise_points)
                                w1 = min(x_axis + revise_points+1, area_width)
                                h0 = max(0, y_axis - revise_points)
                                h1 = min(y_axis + revise_points+1, area_height)
                                area_data[w0:w1, h0:h1] += diff

                            area_data[x_axis, y_axis] = STATION_TEM

                        datasets_logger.info(f"CLDAS {Tcategory} {cldasTime[2:]} REVISED SUCCESS")

            np.save(cldas_file, area_data.astype(np.float32))
            datasets_logger.info(f"SAVE CLDAS DATA:{cldas_file}")

            grbs.close()

    def data_split(self):
        datasets_logger.info("Data Filtering ...")

        for T_cls in Tcategorys:
            with open(join(txt_path, T_cls, f"{T_cls}_all.txt"), 'w') as f:
                f.truncate()
            scmoc_dir = f"{scmoc_Micaps_path}/{T_cls}"
            scmoc_files = []
            if len(self.yearSteps) > 1:

                for st_mon in [str(m).rjust(2, '0') for m in range(int(self.dataStartDate[4:6]), 13)]:
                    scmoc_files += glob(
                        f"{scmoc_dir}/{str(self.yearSteps[0])[2:4]}{st_mon}*{[ht for ht in htimes]}.???")
                for stp_mon in [str(m).rjust(2, '0') for m in range(1, int(self.dataEndDate[4:6]) + 1)]:
                    scmoc_files += glob(
                        f"{scmoc_dir}/{str(self.yearSteps[-1])[2:4]}{stp_mon}*{[ht for ht in htimes]}.???")

                if len(self.yearSteps) > 2:
                    for yearSt in self.yearSteps[1:-1]:
                        for mon in [str(m).rjust(2, '0') for m in range(1, 13)]:
                            scmoc_files += glob(f"{scmoc_dir}/{str(yearSt)[2:4]}{mon}*{[ht for ht in htimes]}.???")
            else:
                for _mon in [str(m).rjust(2, '0') for m in
                             range(int(self.dataStartDate[4:6]), int(self.dataEndDate[4:6]) + 1)]:
                    scmoc_files += sorted(glob(
                        f"{scmoc_dir}/{str(self.yearSteps[0])[2:4]}{_mon}*{[ht for ht in htimes]}.???"))
            scmoc_files = sorted(list(set(scmoc_files)))
            # for sfile in scmoc_files:
            #     make_allTxt(sfile)
            spool = Pool(processes=processCnt)
            spool.map(self.make_allTxt, scmoc_files, chunksize=1)
            spool.close()
            spool.join()

            with open(join(txt_path, T_cls, f"{T_cls}_all.txt")) as f:
                lines = sorted(f.readlines())
            with open(join(txt_path, T_cls, f"{T_cls}_all.txt"), 'w') as f:
                f.write(''.join(lines))

        datasets_logger.info("Data filtering completed")
        for T_cls in Tcategorys:
            trainTxt = open(join(txt_path, T_cls, f"{T_cls}_train.txt"), "w")
            valTxt = open(join(txt_path, T_cls, f"{T_cls}_val.txt"), "w")

            allfiles = open(join(txt_path, T_cls, f"{T_cls}_all.txt"), "r").readlines()

            random.seed(2020)
            random.shuffle(allfiles)
            valList = random.sample(allfiles, int(len(allfiles) * valrate))
            allfiles.sort(key=lambda x: basename(x))

            train_cnt = 0
            val_cnt = 0
            for data_path in allfiles:
                datatime = '20' + basename(data_path.strip()).split('.')[0]
                fststep = int(basename(data_path.strip()).split('.')[1])
                fsttime = get_time(datatime, hours=fststep)

                if int(datatime[:8]) < int(self.dataStartDate) or int(fsttime[:10]) >= int(
                        self.dataEndTime.strftime('%Y%m%d%H')):
                    continue

                elif data_path in valList:
                    val_cnt += 1
                    valTxt.write(data_path)
                elif int(self.last_valDate_start) < int(fsttime[:8]) < int(self.last_valDate_end) \
                        or int(fsttime[:8]) >= int(self.valData_StartDate):
                    val_cnt += 1
                    valTxt.write(data_path)
                    train_cnt += 1
                    trainTxt.write(data_path)
                else:
                    train_cnt += 1
                    trainTxt.write(data_path)

            trainTxt.close()
            valTxt.close()

            msg = f"{T_cls} VALID SCMOC DATA COUNT：{len(allfiles)}"
            datasets_logger.info(msg)

            if train_cnt * val_cnt == 0:
                datasets_logger.error("No Train/Val Data Count!")

    def build_cldas(self):
        datasets_logger.info("MAKING CLDAS DATA ...")

        TM_FILES = []
        if len(self.yearSteps) > 1:
            for stmon in range(int(self.dataStartDate[4:6]), 13):
                TM_FILES += glob(
                    f"{cldas_grib2_path}/{self.yearSteps[0]}/{self.yearSteps[0]}{str(stmon).rjust(2, '0')}*/*P_CLDAS_RT_CHN_0P05_DAY-M{[tc[-1] for tc in Tcategorys]}T-20*.GRB2")
            for stpmon in range(int(self.dataEndDate[4:6]) + 1):
                TM_FILES += glob(
                    f"{cldas_grib2_path}/{self.yearSteps[-1]}/{self.yearSteps[-1]}{str(stpmon).rjust(2, '0')}*/*P_CLDAS_RT_CHN_0P05_DAY-M{[tc[-1] for tc in Tcategorys]}T-20*.GRB2")
            if len(self.yearSteps) > 2:
                for year in self.yearSteps[1:-1]:
                    TM_FILES += glob(
                        f"{cldas_grib2_path}/{year}/*/*P_CLDAS_RT_CHN_0P05_DAY-M{[tc[-1] for tc in Tcategorys]}T-20*.GRB2")
        else:
            for tmon in range(int(self.dataStartDate[4:6]), int(self.dataEndDate[4:6]) + 1):
                TM_FILES += glob(
                    f"{cldas_grib2_path}/{self.dataStartDate[:4]}/{self.dataStartDate[:4]}{str(tmon).rjust(2, '0')}*/*P_CLDAS_RT_CHN_0P05_DAY-M{[tc[-1] for tc in Tcategorys]}T-20*.GRB2")

        TM_FILES = list(set(TM_FILES))
        TM_FILES.sort(key=lambda fname: basename(fname).split('Z_NAFP_C_BABJ_')[1], reverse=True)

        if len(TM_FILES) and self.dataDateCnts > 10:
            cpool = Pool(processes=processCnt)
            cpool.map(self.mk_cldas, TM_FILES, chunksize=1)
            cpool.close()
            cpool.join()
        else:
            for file in TM_FILES:
                self.mk_cldas(file)

        datasets_logger.info("MAKING CLDAS DATA COMPLETED!")


if __name__ == '__main__':
    Datasets(dataEndTime=currentTime, dataDateCnts=datadays, valDataDateCnts=valdays)
