"""
this script is used to define the dataset of earthquake
the .csv file in data_path store the information including raw data path, cluster id and so on.
"""
import time
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
from ConvNetQuake_pytorch.constant import DATASET_PATH, TRAIN_PATH
import threading
from queue import Queue
from collections import Counter


class EarthquakeDataset(Dataset):
    """
    @Deprecated
    """

    def __init__(self, data_path):
        self.dataset = pd.read_csv(data_path)
        self.cache_idx = 0
        # the index interval of cache, the left and the right are both accessible
        self.cache_data = np.load(self.dataset.data_path[self.cache_idx])
        self.cache_label = np.load(self.dataset.label_path[self.cache_idx])
        self.interval = [self.dataset.start_index[self.cache_idx], self.dataset.end_index[self.cache_idx]]

    def __len__(self):
        return self.dataset.at[self.dataset.shape[0] - 1, 'end_index']

    def __get_cache_index(self, index):  # private method which is inaccessible except 'self'
        t = time.time()
        df = self.dataset[(self.dataset.start_index <= index) & (self.dataset.end_index >= index)]
        self.cache_idx = df.index[0]
        self.cache_data = np.load(self.dataset.data_path[self.cache_idx])
        self.cache_label = np.load(self.dataset.label_path[self.cache_idx])
        self.interval = [self.dataset.at[self.cache_idx, 'start_index'], self.dataset.at[self.cache_idx, 'end_index']]
        tqdm.write(f"update cache spend {time.time() - t}s.")

    def __getitem__(self, index):
        if index < self.interval[0] or index > self.interval[1]:  # update the cache
            self.__get_cache_index(index)
        # get the data and label from cache
        _data = self.cache_data[index - self.interval[0]]
        _label = self.cache_label[index - self.interval[0]] + 1
        _label = 1 if _label > 0 else 0  # 0 means noise and 1 means events

        return _data, _label

    def init_cache(self, producer_num):
        pass


class LoadCache(threading.Thread):
    """
    data cache producer. load data to queue.
    """

    def __init__(self, producer_name, queue: Queue, dataset, cursor_queue):
        super().__init__()
        self.producer_name = producer_name
        self.cursor_queue = cursor_queue
        self.dataset = dataset
        self.queue = queue

    def run(self):
        tqdm.write(f"[Thread name is {self.name}] {self.producer_name} start working...")
        while True:
            cursor = self.cursor_queue.get()
            # tqdm.write(f"{self.producer_name} is producing data-{cursor}...")
            if cursor == -1:
                self.cursor_queue.put(-1)
                tqdm.write(f"{self.producer_name} stop produce.")
                break
            data_cache = np.load(self.dataset.data_path[cursor])
            label_cache = np.load(self.dataset.label_path[cursor])
            capacity = data_cache.shape[0]
            self.queue.put((capacity, data_cache, label_cache))


class EarthquakeDatasetCache(Dataset):
    """
    the EarthquakeDatasetCache has a cache to store data.
    this class is recommended to the situation that shuffle is false.
    """

    def __init__(self, dataset_path, binary_classification):
        self.dataset = pd.read_csv(dataset_path)

        self.cache_data = np.array([])
        self.cache_label = np.array([])
        self.trash = 0
        self.inventory = 0
        self.cursor_queue = None
        self.queue = None

        self.simplify = binary_classification  # determine whether simplify the task to a binary classification

    def __len__(self):
        return self.dataset.at[self.dataset.shape[0] - 1, 'end_index']

    def __getitem__(self, index):
        if index >= self.trash + self.inventory:
            self.trash += self.inventory
            self.inventory, self.cache_data, self.cache_label = self.queue.get()
            # tqdm.write("trash = {}, inventory = {}".format(self.trash, self.inventory))
        # get the data and label from cache
        _data = self.cache_data[index - self.trash]
        _label = self.cache_label[index - self.trash] + 1
        if self.simplify:  # simplify the task as binary classification
            _label = 1 if _label > 0 else 0  # 0 means noise and 1 means events
        return _data, _label

    def init_cache(self, producer_num):
        print("init cache")
        self.trash = 0
        self.inventory = 0
        self.cursor_queue = Queue(self.dataset.shape[0] + 1)
        self.queue = Queue(producer_num + 1)  # the capacity of cache queue

        for i in range(0, self.dataset.shape[0]):
            self.cursor_queue.put(i)
        self.cursor_queue.put(-1)  # -1 is stop flag

        for i in range(producer_num):
            thread = LoadCache(f'producer_{i}', self.queue, self.dataset, self.cursor_queue)
            thread.setDaemon(True)
            thread.start()


if __name__ == "__main__":
    "for test"
    # earthquake_dataset = EarthquakeDatasetCache(os.path.join(TRAIN_PATH, 'test.csv'), True)
    earthquake_dataset = EarthquakeDataset(os.path.join(TRAIN_PATH, 'train.csv'))
    earthquake_data_loader = DataLoader(dataset=earthquake_dataset, batch_size=512, shuffle=False)
    length = len(earthquake_data_loader)

    print(f"the length of data loader is {length}")
    print(f"the length of data dataset is {len(earthquake_dataset)}")

    earthquake_dataset.init_cache(3)
    with tqdm(total=length) as bar:
        for idx, (data, label) in enumerate(earthquake_data_loader):
            # print(data.shape)
            # print(label)
            c = Counter(label.numpy())
            print(c)
            # time.sleep(0.001)
            bar.update(1)
