"""
this script is used to create positive samples which is with earthquake
Note: we save the data as numpy, while the author save the data as tfrecords.
"""
import json
import numpy as np
import os
import fnmatch
import pandas as pd
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
from openquake.hazardlib.geo.geodetic import distance
from tqdm import tqdm
from constant import SAMPLE_RATE, STREAM_PATH, CATALOG_CLUSTER_PATH, WINDOW_SIZE, \
    OK027_POS, OK029_POS, TRAIN_PATH, FILTER_LAT, FILTER_LON, DATASET_PATH

stations_dict = {'OK027': OK027_POS,
                 'OK029': OK029_POS}


def filter_catalog(cat):
    # Filter around Guthrie sequence
    cat = cat[(cat.latitude > FILTER_LAT[0]) & (cat.latitude < FILTER_LAT[1])
              & (cat.longitude > FILTER_LON[0]) & (cat.longitude < FILTER_LON[1])]
    return cat


def preprocess_stream(stream):
    stream = stream.detrend('constant')
    return stream.normalize()


def distance_to_station(station_id, lat, long, depth):
    # station GPS coordinates
    lat0 = stations_dict[station_id]['lat']
    long0 = stations_dict[station_id]['lon']
    depth0 = -0.333
    # return distance of the event to the station
    return distance(long, lat, depth, long0, lat0, depth0)


def get_travel_time(catalog, station_id):
    """Find the time between origin and propagation"""
    v_mean = 5.0  # 该值来自于作者的代码
    coordinates = [(_station_id, lat, lon, depth) for (_station_id, lat, lon, depth)
                   in zip([station_id] * catalog.shape[0],
                          catalog.latitude,
                          catalog.longitude,
                          catalog.depth)]
    distances_to_station = [distance_to_station(_station_id, lat, lon, depth)
                            for (_station_id, lat, lon, depth) in coordinates]
    travel_time = [distance / v_mean for distance in distances_to_station]
    return travel_time


def main():
    stream_files = [file for file in os.listdir(STREAM_PATH) if
                    fnmatch.fnmatch(file, '*.mseed')]
    cat = pd.read_csv(CATALOG_CLUSTER_PATH)
    cat = filter_catalog(cat)
    if not os.path.exists(DATASET_PATH):
        dataset = pd.DataFrame(columns=['data_info_path', 'data_path', 'amount', 'category'])
    else:
        dataset = pd.read_csv(DATASET_PATH)
        dataset_columns = dataset.columns

    for stream_file in tqdm(stream_files):
        # print("\n Now the file is {}".format(stream_file), flush=True)
        tqdm.write("Now the file is {}".format(stream_file))
        stream_path = os.path.join(STREAM_PATH, stream_file)
        stream = read(stream_path)
        stream = preprocess_stream(stream)
        station_id = stream[0].stats.station

        start_date = stream[0].stats.starttime
        end_date = stream[-1].stats.endtime
        filtered_catalog = cat[
            ((cat.utc_timestamp >= start_date)
             & (cat.utc_timestamp < end_date))]
        # print("the length of filetered_catalog is {}".format(filtered_catalog.shape[0]), flush=True)
        tqdm.write("the length of filetered_catalog is {}".format(filtered_catalog.shape[0]))

        df_columns = ['data_idx', 'cluster_id', 'channel', 'station']
        data_df = pd.DataFrame(columns=df_columns)
        data_npy = []
        data_npy_idx = 0
        travel_time = get_travel_time(filtered_catalog, station_id)
        for idx, row in enumerate(filtered_catalog.itertuples()):
            event_time = row.utc_timestamp
            event_time += travel_time[idx]
            st_event = stream.slice(UTCDateTime(event_time),
                                    UTCDateTime(event_time) + WINDOW_SIZE).copy()
            cluster_id = row.cluster_id

            if (len(st_event) < 3):  # we need NN1,NN2,NNE channels data
                continue
            if (len(st_event[0]) < WINDOW_SIZE * SAMPLE_RATE):  # skip data which is not completed
                continue

            data = np.array([tr.dataset[:1000] for tr in st_event], dtype=np.float32)
            channels = [tr.stats.channel for tr in st_event]
            station_ids = [tr.stats.station for tr in st_event]

            # 编码
            channels = json.dumps(channels)
            station_ids = json.dumps(station_ids)

            df = pd.DataFrame(data=[[data_npy_idx, cluster_id, channels, station_ids]], columns=df_columns)
            data_df = pd.concat([data_df, df], ignore_index=True)
            data_npy.append(data)
            data_npy_idx += 1

        df_save_name = 'positive\\{}.csv'.format(stream_file.split('.mseed')[0])
        df_save_path = os.path.join(TRAIN_PATH, df_save_name)
        data_df.to_csv(df_save_path, index=False)

        npy_save_name = 'positive\\{}.npy'.format(stream_file.split('.mseed')[0])
        npy_save_path = os.path.join(TRAIN_PATH, npy_save_name)
        data_npy = np.array(data_npy, dtype=np.float32)
        np.save(npy_save_path, data_npy)

        dataset_df = pd.DataFrame(data=[[df_save_path, npy_save_path, data_npy.shape[0], 1]],
                                  columns=dataset_columns)  # category=1 means earthquake wave data
        dataset = pd.concat([dataset, dataset_df], ignore_index=True)
        dataset.to_csv(DATASET_PATH, index=False)

    print("over.")


if __name__ == "__main__":
    main()
