from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import h5py
from os import path
from scipy.signal import windows
from torch.utils.data import Dataset, dataloader
import random
try:
    from utils import *
except:
    from src.utils import *


class PPDataset(Dataset):
    def __init__(self, dataset_path, dataset_csv, methodmame="fcy1", columns_useful=None, dlen=6000) -> None:
        """dataset reader class, randomly split dataset into train and test sets

        Args:
            dataset_path: Defaults to "F:/RealSeisData/DiTing/Diting50hz/".
            test_prop: Defaults to 0.1.
            shuffle: Defaults to False.
            seed: Defaults to 1998.
        """
        self.methodname = methodmame
        self.dataset_path = dataset_path
        self.dlen = dlen
        self.data_raw = np.empty((9000, 3), dtype=np.float64)
        # read csv file
        if columns_useful is None:
            columns_useful = ['part', 'key', 'p_pick', 's_pick',
                              'st_mag', 'Z_P_power_snr', 'Z_S_power_snr', ]
        self.csv_path = path.join(dataset_path, dataset_csv)
        self.catalog = pd.read_csv(self.csv_path, usecols=columns_useful, dtype={
                                   'part': int, 'key': str, 'st_mag': float})
        # print(dataset_csv, ":")
        # self.catalog.info()
        self.datanum = self.catalog.shape[0]
        # determine which method to use
        if self.methodname == 'fcy1':
            self.getitem = self.getitem_fcy1
        elif self.methodname == 'fcy2':
            self.getitem = self.getitem_fcy2
        elif self.methodname == 'fcy3':
            self.getitem = self.getitem_fcy3
            pass

    def open_hdf5(self):
        # open hdf5 files
        self.hdf5list = []
        for i in range(28):
            fname = "DiTing330km_part_{}.hdf5".format(i)
            hdf5_path = path.join(self.dataset_path, fname)
            # TODO accelerate
            self.hdf5list.append(
                h5py.File(hdf5_path, "r", "stdio", rdcc_nbytes=1024*1024*64))

    def __len__(self):
        return self.datanum

    def get_catalog(self):
        return self.catalog

    def getitem_fcy1(self, idx):
        # 处理：归一化
        if not hasattr(self, 'hdf5list'):
            self.open_hdf5()
        # print(idx)
        info = self.catalog.iloc[idx]
        f = self.hdf5list[info['part']]
        key_correct = str(info['key']).split('.')
        key = key_correct[0].rjust(6, '0') + '.' + key_correct[1].ljust(4, '0')
        f['earthquake/'+key].read_direct(self.data_raw)
        data = self.data_raw.astype(np.float32).transpose()
        data = preprocess(data)
        label = gen_gaussianlabel(
            info['p_pick'], info['s_pick'], data.shape[1])
        return data, label

    def getitem_fcy2(self, idx):
        # 处理：归一化、截短到4000长度
        if not hasattr(self, 'hdf5list'):
            self.open_hdf5()
        # print(idx)
        info = self.catalog.iloc[idx]
        f = self.hdf5list[info['part']]
        key_correct = str(info['key']).split('.')
        key = key_correct[0].rjust(6, '0') + '.' + key_correct[1].ljust(4, '0')
        f['earthquake/'+key].read_direct(self.data_raw)
        data = self.data_raw.transpose().astype(np.float32)
        # random crop data
        data, p_arr, s_arr, wstart = crop_data(
            data, info['p_pick'], info['s_pick'], self.dlen)
        data = preprocess(data)
        label = gen_trilabel(p_arr, s_arr, data.shape[1])
        return data, label

    def getitem_fcy3(self, idx):
        # 处理：归一化、截短到4000长度，微分变成加速度值
        if not hasattr(self, 'hdf5list'):
            self.open_hdf5()
        # print(idx)
        info = self.catalog.iloc[idx]
        f = self.hdf5list[info['part']]
        key_correct = str(info['key']).split('.')
        key = key_correct[0].rjust(6, '0') + '.' + key_correct[1].ljust(4, '0')
        f['earthquake/'+key].read_direct(self.data_raw)
        data = self.data_raw.transpose().astype(np.float32)
        # random crop data
        data, p_arr, s_arr, wstart = crop_data(
            data, info['p_pick'], info['s_pick'], self.dlen)
        data = preprocess(data)
        data = np.diff(data, axis=1, prepend=0)
        label = gen_trilabel(p_arr, s_arr, data.shape[1])
        return data, label

    def __getitem__(self, index):
        return self.getitem(index)


M2 = 41
std_p = 6
std_s = 6
w_p = windows.gaussian(M2, std_p)
w_s = windows.gaussian(M2, std_s)
w_p_small = windows.gaussian(M2, std_p/2)*0.8
w_s_small = windows.gaussian(M2, std_s/2)*0.8

npconcat = np.concatenate
npzeros = np.zeros
npones = np.ones


def gen_gaussianlabel(p_arr, s_arr, width):
    """生成拾取标签，即在震相到达处是个高斯窗，其余位置全0

    Args:
        p_arr: P波到达采样点
        s_arr: S波到达采样点
        width: 数据长度

    Returns:
        三分量的高斯窗拾取标签
    """
    l_p = int(p_arr-(M2-1)/2)
    l_s = int(s_arr-(M2-1)/2)
    if (s_arr-p_arr > M2/2):
        # generate pick label
        label_p2 = npzeros(width)
        label_p2[l_p: l_p+M2] = w_p
        label_s2 = npzeros(width)
        label_s2[l_s: l_s+M2] = w_s
        # label_n2 = npones(width)-label_p2-label_s2
    else:
        # generate narrower pick label
        label_p2 = npzeros(width)
        label_p2[l_p: l_p+M2] = w_p_small
        label_s2 = npzeros(width)
        label_s2[l_s: l_s+M2] = w_s_small
        # label_n2 = npones(width)-label_p2-label_s2
    # return np.array([label_p2, label_s2, label_n2])
    return np.array([label_p2, label_s2])


tri_width = 51
half_tri_width = int((tri_width-1)/2)
win_tri = 1-np.abs(np.linspace(-1, 1, tri_width))


def gen_trilabel(p_arr, s_arr, width):
    l_p = p_arr-half_tri_width
    l_s = s_arr-half_tri_width
    # generate pick label
    # try:
    label_p = npzeros(width)
    label_p[l_p: l_p+tri_width] = win_tri
    label_s = npzeros(width)
    label_s[l_s: l_s+tri_width] = win_tri
    # except:
    #     print(label_p.shape,label_s.shape,win_tri.shape)
    #     print(l_p, l_s, tri_width)
    return np.array([label_p, label_s])


def gen_trilabel_snrheight(p_arr, s_arr, width, wp, ws):
    l_p = p_arr-half_tri_width
    l_s = s_arr-half_tri_width
    # generate pick label
    label_p = npzeros(width)
    label_p[l_p: l_p+tri_width] = win_tri*wp
    label_s = npzeros(width)
    label_s[l_s: l_s+tri_width] = win_tri*ws
    return np.array([label_p, label_s])



def crop_data(data_raw, p_arr, s_arr, crop_size = 4032):
    # raw shape: (3,9000)
    # randomly select a window for data, vary with each call!
    left = max(0, s_arr-crop_size + half_tri_width)+5
    right = min(p_arr-half_tri_width, data_raw.shape[1]-crop_size)-1
    # print("minmax wstart",left, right)
    wstart = int(random.uniform(left, right))
    # print("wstart,parr,sarr", wstart, p_arr, s_arr)
    p_arr = p_arr-wstart
    s_arr = s_arr-wstart
    # cut random window
    data = data_raw[:, wstart:wstart+crop_size]
    return data, p_arr, s_arr, wstart


if __name__ == "__main__":
    dataset_path = "E:/RealSeisData/Diting50hz/"
    dataset = "DiTing330km_test.csv"
    ppd = PPDataset(dataset_path, dataset, "fcy3")
    d = ppd.getitem(22)
    print(ppd.__len__())
    parr = 4500
    sarr = 4750
    label = gen_gaussianlabel(parr, sarr, 9000)
    label_crop, p_arr, s_arr, wstart = crop_data(label, parr, sarr)
    print(p_arr, s_arr, wstart)
    # plot3C(ppd.__getitem__(12)[0])
    # plot3C(ppd.__getitem__(12)[1])
    exit()
    l = gen_gaussianlabel(100, 280, 300)
    # print(l)
    plt.plot(l[0])
    plt.plot(l[1])
    plt.show()
