"""
this script is used to provide some function
"""
import os

import pandas as pd
import numpy as np
from tqdm import tqdm

import sys

sys.path.append('.//')

from ConvNetQuake_pytorch.constant import DATASET_PATH, TRAIN_PATH
from eq_dataset import EarthquakeDataset


def split_train_test():
    """
    Split the dataset into train and test part, while the dataset is a .csv file and is ordered.
    Note that the train and test part are both save as csv file, which save the data information including path and
    cluster id. And we save the raw waveform data as npy file.
    """
    dataset = pd.read_csv(DATASET_PATH)
    dataset_noise = dataset[dataset.category == 0]
    dataset_event = dataset[dataset.category == 1]
    dataset_augmentation = dataset[dataset.category == 2]

    divs = [int(0.7 * dataset_noise.shape[0]), int(0.7 * dataset_event.shape[0]),
            int(0.7 * dataset_augmentation.shape[0])]
    dataset_train = pd.concat(
        [dataset_noise.iloc[:divs[0]], dataset_event.iloc[:divs[1]], dataset_augmentation.iloc[:divs[2]]],
        ignore_index=True)
    dataset_test = pd.concat(
        [dataset_noise.iloc[divs[0]:], dataset_event.iloc[divs[1]:], dataset_augmentation.iloc[divs[2]:]],
        ignore_index=True)
    dataset_train.to_csv(os.path.join(TRAIN_PATH, "train.csv"), index=False)
    dataset_test.to_csv(os.path.join(TRAIN_PATH, "test.csv"), index=False)


def split_train_test_shuffle():
    """
    split the dataset into train and test part randomly. this function is similar to the function split_train_test.
    Note that this action is aim to speed up the train step, so I save the waveform data and labels both in npy file
    which have a bigger size, and I save the path of waveform data and label in csv file.
    """
    dataset = pd.read_csv(DATASET_PATH)
    train_dataset = dataset.groupby(by=['category']).sample(frac=0.8, replace=False, random_state=1)

    test_dataset = dataset.drop(index=train_dataset.index)
    test_events = test_dataset[test_dataset.category > 0]
    test_noise = test_dataset[test_dataset.category == 0]
    count = test_events.amount.sum() // 200
    test_noise = test_noise.sample(n=3 * count, replace=False,
                                   random_state=1)  # the number of noise in test dataset is probably double of events
    test_dataset = pd.concat([test_events, test_noise])

    # shuffle and restore
    train_dataset = train_dataset.sample(frac=1.0, replace=False, random_state=1)
    test_dataset = test_dataset.sample(frac=1.0, replace=False, random_state=1)

    it = test_dataset.itertuples()
    shuffle_and_restore(it, 'test', test_dataset.shape[0])
    it = train_dataset.itertuples()
    shuffle_and_restore(it, 'train', train_dataset.shape[0])


def shuffle_and_restore(it, name: str, dataset_amount):
    dataset_df = pd.DataFrame(columns=['data_path', 'label_path', 'start_index', 'end_index'])
    process_bar = tqdm(total=dataset_amount)
    process_bar.set_description(name)

    it_head = next(it)
    data_batch = np.load(it_head.data_path)
    label_batch = pd.read_csv(it_head.data_info_path).cluster_id.values if it_head.category > 0 else np.array(
        [-1] * it_head.amount)
    count = it_head.amount
    save_info = {'index': 0, 'name': name, 'start_index': 0, 'end_index': it_head.amount}
    process_bar.update(1)

    for row in it:
        process_bar.update(1)
        process_bar.set_description(f'{name}:{os.path.basename(row.data_path):>35}')
        data = np.load(row.data_path)
        if data.shape[0] == 0:
            continue
        if row.category > 0:
            label = pd.read_csv(row.data_info_path).cluster_id.values
        else:
            label = np.array([-1] * row.amount)

        if count > 50000:  # every npy file save about two hundred thousand data
            tqdm.write("save file ...")
            # shuffle
            # shuffle_index = np.random.permutation(data_batch.shape[0])
            # data_batch = data_batch[shuffle_index, ...]
            # label_batch = label_batch[shuffle_index, ...]
            # save
            save_path_data = os.path.join(TRAIN_PATH, save_info['name'], 'data_{}.npy'.format(save_info['index']))
            save_path_label = os.path.join(TRAIN_PATH, save_info['name'], 'label_{}.npy'.format(save_info['index']))
            np.save(save_path_data, data_batch)
            np.save(save_path_label, label_batch)
            df = pd.DataFrame(
                data=[[save_path_data, save_path_label, save_info['start_index'], save_info['end_index'] - 1]],
                columns=dataset_df.columns)
            dataset_df = pd.concat([dataset_df, df], ignore_index=True)

            count = row.amount
            save_info['start_index'] = save_info['end_index']
            save_info['end_index'] += row.amount
            save_info['index'] += 1
            data_batch = data
            label_batch = label
        else:
            count += row.amount
            save_info['end_index'] += row.amount
            data_batch = np.append(data_batch, data, axis=0)
            label_batch = np.append(label_batch, label)

    if count <= 50000:  # iterate finish but some remain file don't save yet
        tqdm.write("save file ...")
        shuffle_index = np.random.permutation(data_batch.shape[0])
        data_batch = data_batch[shuffle_index, ...]
        label_batch = label_batch[shuffle_index, ...]
        save_path_data = os.path.join(TRAIN_PATH, save_info['name'], 'data_{}.npy'.format(save_info['index']))
        save_path_label = os.path.join(TRAIN_PATH, save_info['name'], 'label_{}.npy'.format(save_info['index']))
        np.save(save_path_data, data_batch)
        np.save(save_path_label, label_batch)
        df = pd.DataFrame(
            data=[[save_path_data, save_path_label, save_info['start_index'], save_info['end_index'] - 1]],
            columns=dataset_df.columns)
        dataset_df = pd.concat([dataset_df, df], ignore_index=True)
    df_save_path = os.path.join(TRAIN_PATH, f'{name}.csv')
    dataset_df.to_csv(df_save_path, index=False)
    process_bar.close()


def refresh_dataset():
    """
    this function is used to refresh the columns 'data_info_path' and 'data_path' of dataset
    if you move your data from a folder to another.
    """
    dataset = pd.read_csv(DATASET_PATH).reset_index()
    for i in tqdm(range(dataset.shape[0])):
        if dataset.at[i, 'category'] == 0:
            file_name = os.path.basename(dataset.at[i, 'data_path'])
            data_path = os.path.join(TRAIN_PATH, 'noise', file_name)
            dataset.at[i, 'data_path'] = data_path
        else:
            if dataset.at[i, 'category'] == 1:
                category = 'positive'
            elif dataset.at[i, 'category'] == 2:
                category = 'augmentation'
            file_name = os.path.basename(dataset.at[i, 'data_path'])
            data_path = os.path.join(TRAIN_PATH, category, file_name)
            dataset.at[i, 'data_path'] = data_path

            info_name = os.path.basename(dataset.at[i, 'data_info_path'])
            data_info_path = os.path.join(TRAIN_PATH, category, info_name)
            dataset.at[i, 'data_info_path'] = data_info_path
    dataset.to_csv(DATASET_PATH, index=False)


def refresh_train_test():
    dataset = pd.read_csv(os.path.join(TRAIN_PATH, 'train.csv'))
    for i in range(dataset.shape[0]):
        filename = os.path.basename(dataset.at[i, 'data_path'])
        dataset.at[i, 'data_path'] = os.path.join(TRAIN_PATH, 'train', filename)
        filename = os.path.basename(dataset.at[i, 'label_path'])
        dataset.at[i, 'label_path'] = os.path.join(TRAIN_PATH, 'train', filename)
    dataset.to_csv(os.path.join(TRAIN_PATH, 'train.csv'), index=False)

    dataset = pd.read_csv(os.path.join(TRAIN_PATH, 'test.csv'))
    for i in range(dataset.shape[0]):
        filename = os.path.basename(dataset.at[i, 'data_path'])
        dataset.at[i, 'data_path'] = os.path.join(TRAIN_PATH, 'test', filename)
        filename = os.path.basename(dataset.at[i, 'label_path'])
        dataset.at[i, 'label_path'] = os.path.join(TRAIN_PATH, 'test', filename)
    dataset.to_csv(os.path.join(TRAIN_PATH, 'test.csv'), index=False)


def squeeze_dataset(category):
    """
    squeeze the dataset of category.
    the events data is extracted from two station so every item of event may have different amount.
    """
    dataset = pd.read_csv(DATASET_PATH)
    dataset_interest = dataset[dataset.category == category]
    dataset = dataset[dataset.category != category]

    for idx, row in enumerate(dataset_interest.itertuples()):
        if row.amount == 0:
            continue
        if idx == 0:
            data_batch = np.load(row.data_path)
            data_info_batch = pd.read_csv(row.data_info_path)
        else:
            data = np.load(row.data_path)
            data_info = pd.read_csv(row.data_info_path)
            data_batch = np.append(data_batch, data, axis=0)
            data_info_batch = pd.concat([data_info_batch, data_info], ignore_index=True)
    # squeeze the data_batch
    start = 0
    while start < data_batch.shape[0]:
        if start + 200 <= data_batch.shape[0]:
            data = data_batch[start:start + 200, ...]
            label = data_info_batch.iloc[start:start + 200]
            amount = 200
        elif start < data_batch.shape[0]:
            data = data_batch[start:, ...]
            label = data_info_batch.iloc[start:]
            amount = data_batch.shape[0] - start
        else:
            break
        data_save_path = os.path.join(TRAIN_PATH, 'positive_squeeze', f'data_{start}.npy')
        label_save_path = os.path.join(TRAIN_PATH, 'positive_squeeze', f'label_{start}.csv')
        np.save(data_save_path, data)
        label.to_csv(label_save_path, index=False)
        df = pd.DataFrame(data=[[label_save_path, data_save_path, amount, 1]], columns=dataset.columns)
        dataset = pd.concat([dataset, df], ignore_index=True)
        start += 200
    dataset.to_csv(DATASET_PATH, index=False)


def main():
    split_train_test_shuffle()
    # squeeze_dataset(1)
    # refresh_train_test()


if __name__ == "__main__":
    main()
