# import os
import torch
import torch.utils.data as data
import numpy as np
from torch.utils.data import DataLoader
from utils.tools import get_time
from utils import metrics
from utils.read_micaps import read_micaps_4
from utils.config import *
from os.path import basename, join as pathjoin, isfile


def make_dataset(Tcategory, mode, timesplit=True, htime='08'):
    tensors = []
    txtpath = f"{txt_path}/{Tcategory}/{Tcategory}_{mode.lower()}.txt"
    txtList = open(txtpath, 'r').readlines()

    for data_path in txtList:
        scmoc_path = data_path.strip()
        data_name_split = basename(scmoc_path).split('.')
        data_time, hourstep = f"20{data_name_split[0]}", int(data_name_split[1])
        # if mode == 'val' and data_time[8:10] != htime:
        if timesplit and data_time[8:10] != htime:
            continue
        # print("只训练前8天数据")
        # if mode == 'val' and hourstep > 192:
        #     continue

        fst_time = get_time(data_time, hours=hourstep)
        cldas_path = pathjoin(cldas_npy_path, Tcategory[:4].upper(), fst_time[2:10] + '.npy')

        item = (scmoc_path, cldas_path)
        if isfile(scmoc_path) * isfile(cldas_path):
            tensors.append(item)
        else:
            pass

    return tensors


def data_loader(scmoc_path, cldas_path):
    # Get torch tensor
    try:
        input_data = read_micaps_4(scmoc_path, scale_off=[1, C_K])[:width, :height]
        target_data = np.load(cldas_path)[:width, :height]

        input_data = torch.from_numpy(input_data).unsqueeze(0)
        target_data = torch.from_numpy(target_data).unsqueeze(0)
        Tdata = (input_data, target_data)

        return Tdata
    except ValueError:
        print("SCMOC DATA ERROR :", basename(scmoc_path))
        return None
    except AttributeError:
        print("AttributeError:", basename(scmoc_path))
        return None


class SegmentedData(data.Dataset):
    def __init__(self, Tcategory, mode, timesplit=True, htime='08', loader=data_loader):
        """
        Load data kept in folders ans their corresponding segmented data

        :param Tcategory: predict type of temperature
        :type Tcategory: str
        :param mode: train/val mode
        :type mode: str
        :param loader: type of data loader
        :type loader: function
        """

        self.tensors = make_dataset(Tcategory, mode, timesplit, htime)
        self.loader = loader

    def __getitem__(self, index):
        # Get path of input data and ground truth
        input_path, target_path = self.tensors[index]

        # Acquire input data and ground truth
        Tdata = self.loader(input_path, target_path)
        # input_tensor, target = self.loader(input_path, target_path)
        return Tdata

    def __len__(self):
        return len(self.tensors)


if __name__ == '__main__':

    n = 0
    category = 'TMAX'
    # category = 'TMIN'
    data_obj_val = SegmentedData(Tcategory=category, mode='val', htime='08')
    val_generator = DataLoader(data_obj_val, batch_size=5, shuffle=False, num_workers=20, pin_memory=True)
    data_len_val = len(data_obj_val)
    print("数据样本数：", data_len_val)

    maexList = []
    # while True:
    for data in val_generator:
        if data is None:
            continue
        x, y = data
        if x.max() > 323:
            continue
        # a = x.numpy().squeeze()
        # b = y.numpy().squeeze()
        # print(np.abs(a-b).mean())

        # print(x[0, 0, 0, :5].cpu().numpy())

        maex = metrics.mae(x.numpy().squeeze(), y.numpy().squeeze())
        print(maex)
        # if maex > 3.5:
        #     print("===>", maex)
        # maexList.append(maex)

    # maex_stack = np.stack(maexList)
    # mae_x = np.mean(maex_stack)
    # print(mae_x)
