"""
This script is used to create negative dataset which is considered as noise.
"""

from obspy.core import read
import os
import pandas as pd
import numpy as np
from tqdm import tqdm

from constant import CATALOG_BENZ_PATH, STREAM_PATH, WINDOW_SIZE, \
    NOISE_WINDOW_STEP, TRAIN_PATH, DATASET_PATH


def filter_catalog(cat, start_time, end_time):
    res = cat[(cat.utc_timestamp > start_time)
              & (cat.utc_timestamp < end_time)]
    return res


def skip_stream_file(file_name):
    # 只有在2014.2和2014.8月之间的数据才可以
    date = file_name.split('_')[-1]
    date = date.split('.')[0]
    month, year = date.split('-')
    if year > '2014' or month < '2' or month > '9':
        return True  # skip this stream file
    else:
        return False


def main():
    stream_file_list = os.listdir(STREAM_PATH)
    cat_raw = pd.read_csv(CATALOG_BENZ_PATH)
    for stream_file in tqdm(stream_file_list):
        if skip_stream_file(stream_file):
            print("skip {}".format(stream_file))
            continue
        stream = read(os.path.join(STREAM_PATH, stream_file))
        # stream = read(r'G:\data\streams\GSOK027_2-2014.mseed')
        start_time = stream[0].stats.starttime
        end_time = stream[-1].stats.endtime
        cat = filter_catalog(cat_raw, start_time, end_time)
        cat_event_times = cat.utc_timestamp.values

        win_gen = stream.slide(window_length=WINDOW_SIZE,
                               step=NOISE_WINDOW_STEP,
                               include_partial_windows=False)

        data_npy = []
        process_bar = tqdm(total=100)  # 进度条
        process_bar.set_description(stream_file)
        save_count = 0
        skip_count = 1
        for idx, win in enumerate(win_gen):
            if idx == 0:
                process_bar.total = stream.slide_len  # 注意：slide_len是自己添加的，官方的库里没有
                process_bar.refresh()
                threshold = stream.slide_len // 20
            process_bar.update(1)
            win_start = win[0].stats.starttime.timestamp
            win_end = win[-1].stats.endtime.timestamp

            if len(win) < 3:  # 3 channels : NN1, NN2, NNE
                continue
            if min(win[0].dataset.shape[0], win[1].dataset.shape[0]) < 1000 or win[2].dataset.shape[0] < 1000:
                continue
            after_start = cat_event_times > win_start
            before_end = cat_event_times < win_end
            cat_idx = np.where(after_start == before_end)[0]  # narray
            if cat_idx.shape[0] > 0:
                tqdm.write(
                    "skip time {} ~ {}, count is {}.".format(win[0].stats.starttime, win[-1].stats.endtime, skip_count))
                skip_count += 1
                continue

            data = np.array([tr.dataset[:1000] for tr in win], dtype=np.float32)
            data_npy.append(data)

            if idx >= threshold:
                data_npy = np.array(data_npy, dtype=np.float32)
                save_name = 'noise\\{}_{}.npy'.format(stream_file.split('.mseed')[0], save_count)
                save_path = os.path.join(TRAIN_PATH, save_name)
                np.save(save_path, data_npy)

                save_count += 1
                data_npy = []
                threshold = min(threshold + stream.slide_len // 20, stream.slide_len - 1)

        process_bar.close()


def noise2dataset():
    # add the noise data path to dataset.csv
    dataset = pd.read_csv(DATASET_PATH)
    columns = dataset.columns
    noise_path = os.path.join(TRAIN_PATH, 'noise')
    file_list = os.listdir(noise_path)
    with tqdm(total=len(file_list)) as process_bar:
        for file in file_list:
            process_bar.set_description(file)
            process_bar.update(1)
            data_path = os.path.join(noise_path, file)
            data = np.load(data_path)
            amount = data.shape[0]
            category = 0
            df = pd.DataFrame(data=[[pd.NA, data_path, amount, category]], columns=columns)
            dataset = pd.concat([dataset, df], ignore_index=True)
        dataset.to_csv(DATASET_PATH, index=False)


def split_data():
    # split data and using more small size to store
    noise_path = os.path.join(TRAIN_PATH, 'noise')
    file_list = os.listdir(noise_path)
    LEN = 200
    with tqdm(total=len(file_list)) as process_bar:
        for file in file_list:
            process_bar.set_description(file)
            process_bar.update(1)

            count = 0
            src_path = os.path.join(noise_path, file)
            file_name = os.path.basename(src_path).split('.')[0]
            data = np.load(src_path)
            start = min(count * LEN, data.shape[0] - 1)
            end = min(count * LEN + LEN, data.shape[0] - 1)
            while end < data.shape[0] - 1:
                npy = data[start:end, ...]
                save_path = os.path.join(TRAIN_PATH, "noise_split", f'{file_name}_{count}.npy')
                np.save(save_path, npy)

                count += 1
                start = min(count * LEN, data.shape[0] - 1)
                end = min(count * LEN + LEN, data.shape[0] - 1)


if __name__ == "__main__":
    # main()
    noise2dataset()
    # split_data()
