import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

import os
import pickle

from NLFE.nlfe import approximate_entropy, sample_entropy, fuzzy_entropy

from features import FormattedData


class FeatureExtraction:
    def __init__(self):
        self.raw_data_path = "../raw_data"
        self.sub_folder_list = ["DT", "IEE", "NORMAL"]
        self.window_size = 40
        self.stride = 40
        self.data_len = 80
        self.interest_cols = [0, 1, 3]
        self.labels = [1, 2, 3]
        self.sample_labels = []
        self.samples = []

    def _next_file_data(self):
        for label, d in zip(self.labels, self.sub_folder_list):
            sub_folder = os.path.join(self.raw_data_path, d)
            all_file = os.listdir(sub_folder)
            for filename in all_file:
                self.sample_labels.append(label)

                file_data_df = pd.read_csv(os.path.join(sub_folder, filename))
                file_data_np = file_data_df.values
                needed_data_np = file_data_np[:self.data_len, self.interest_cols]

                yield needed_data_np

    def extract_entropy(self, entropy_func, n=0):
        for needed_data_np in self._next_file_data():
            window_nums = int((self.data_len - self.stride) / self.window_size + 1)
            file_fea = np.zeros((window_nums, len(self.interest_cols)))
            for j in range(len(self.interest_cols)):
                for i in range(window_nums):
                    s = i * self.stride
                    e = s + self.window_size
                    ts = needed_data_np[s:e, :]
                    m = 3
                    r = np.std(ts, axis=0)
                    if not n:
                        file_fea[i, j] = entropy_func(ts[:, j], m, r[j])
                    else:
                        file_fea[i, j] = entropy_func(ts[:, j], m, n, r[j])

            self.samples.append(np.transpose(file_fea).flatten())

    def reset(self):
        self.sample_labels = []
        self.samples = []

    def save_features(self, output_path, output_name):
        data_len = len(self.samples)
        desc = {"sample_nums": data_len}
        formatted_data = FormattedData(np.array(self.samples), np.array(self.sample_labels), desc)
        with open(os.path.join(output_path, output_name), "wb") as f:
            pickle.dump(formatted_data, f)


def fea_extraction():
    raw_data_path = "../raw_data"
    sub_folder_list = ["DT", "IEE", "NORMAL"]
    window_size = 40
    stride = 40
    data_len = 80
    interest_cols = [0, 1, 3]
    labels = [1, 2, 3]
    sample_label = []
    samples = []
    for label, d in zip(labels, sub_folder_list):
        sub_folder = os.path.join(raw_data_path, d)
        all_file = os.listdir(sub_folder)
        for filename in all_file:
            sample_label.append(label)
            file_data_df = pd.read_csv(os.path.join(sub_folder, filename))
            # print(file_data_df.head())
            file_data_np = file_data_df.values
            needed_data_np = file_data_np[:data_len, interest_cols]
            window_nums = int((data_len - stride) / window_size + 1)

            file_fea = np.zeros((window_nums, len(interest_cols)))
            for j in range(len(interest_cols)):
                for i in range(window_nums):
                    s = i * stride
                    e = s + window_size
                    ts = needed_data_np[s:e, :]
                    m = 3
                    r = np.std(ts, axis=0)
                    # print(ts[:, j], m, r[j])
                    file_fea[i, j] = approximate_entropy(ts[:, j], m, r[j])

            samples.append(np.transpose(file_fea).flatten())
    return np.array(samples), np.array(sample_label)


def save_fea():
    samples, sample_labels = fea_extraction()
    print(samples.shape, sample_labels.shape)
    pickle.dump(samples, open("../features/samples.pkl", "wb"))
    pickle.dump(sample_labels, open("../features/labels.pkl", "wb"))


if __name__ == '__main__':
    # save_fea()
    # print(len(samples))
    # print(len(sample_label))
    # sample_pkl_file = open("../features/samples.pkl", "rb")
    # label_pkl_file = open("../features/labels.pkl", "rb")
    #
    # samples = pickle.load(open("../features/samples.pkl", "rb"))
    # labels = pickle.load(open("../features/labels.pkl", "rb"))
    #
    # sample_pkl_file.close()
    # label_pkl_file.close()

    # label1 = np.argwhere(labels == 1)
    # label2 = np.argwhere(labels == 2)
    # label3 = np.argwhere(labels == 3)
    # print(label1)
    # plt.plot(label1, samples[:len(label1), ])
    # plt.show()
    # print(type(samples))
    # print(type(labels))

    fe = FeatureExtraction()
    fe.extract_entropy(approximate_entropy)
    fe.save_features(output_path="../features", output_name="ApEn_fea.pkl")

    print("------------------------------")
    print(fe.samples[-5:])
    print(fe.sample_labels[-5:])

    fe.reset()
    fe.extract_entropy(sample_entropy)
    fe.save_features(output_path="../features", output_name="SamEn_fea.pkl")

    print("------------------------------")
    print(fe.samples[-5:])
    print(fe.sample_labels[-5:])

    fe.reset()
    fe.extract_entropy(fuzzy_entropy, n=2)
    fe.save_features(output_path="../features", output_name="FuEn_fea.pkl")
    print("------------------------------")
    print(fe.samples[-5:])
    print(fe.sample_labels[-5:])
