from utils.constant import *
from utils.core import *

import json
from typing import Generator, Union 

import torch
from tqdm import tqdm


def get_wavfile_path(meta_path : str = "./data/meta.json", train : bool = True):
    with open(meta_path, "r", encoding="utf-8") as fp:
        file_dict = json.load(fp=fp)
    files = file_dict["train" if train else "test"]
    return files


def dump_preprocess(dump_dir : str, train : bool = True, numc : int = 8, reduce_dim : int = -1, 
                  cluster_dict : dict = cluster_dict, loc : Union[int, list, tuple] = (0, 1, 2),
                  method : str = "average"):

    if not os.path.exists(dump_dir):
        os.makedirs(dump_dir)

    with open("./data/meta.json", "r", encoding="utf-8") as fp:
        file_dict = json.load(fp=fp)
    files = file_dict["train" if train else "test"]
    X = []
    y = []
    for file in tqdm(files):
        mfcc = frame_mfcc_from_file(file, numc=numc)
        mfcc_feature = reduce_feature(mfcc, output_dim=reduce_dim)
        labels = cluster_mfcc_feature(mfcc_feature, **cluster_dict)
        x = choose_point_base_on_label(mfcc_feature, labels, loc=loc).reshape(-1)
        X.append(x)

    X = np.array(X)
    y = np.array(y)
    np.save(dump_dir + "/features.npy", arr=X)
    np.save(dump_dir + "/labels.npy", arr=y)

def load_abnormal(meta_path : str = "./data/meta.json", return_numpy : bool = True, train : bool = True, X_type : str = "mfcc_vector", 
                  numc : int = 8, reduce_dim : int = -1, cluster_dict : dict = cluster_dict, loc : Union[int, list, tuple] = (0, 1, 2),
                  method : str = "average", n_flag : str = "negative", p_flag : str = "positive"):

    with open(meta_path, "r", encoding="utf-8") as fp:
        file_dict = json.load(fp=fp)
    files = file_dict["train" if train else "test"]

    X = []
    y = []
    for file in files:
        # determine the label
        if "//" in file:
            split_file = file.split("//")
        elif "/" in file:
            split_file = file.split("/")
        elif "\\" in file:
            split_file = file.split("\\")
        if   p_flag in split_file:
            label = 0
        elif n_flag in split_file:
            label = 1
        else:
            raise FileExistsError("""Input file path must carry the flag to represent the class! 
                But receive {} without {} or {}. Consider assign the p_flag and n_flag.""".format(file, p_flag, n_flag))
        
        x = process_wav_file(file, mode=X_type)
        X.append(x)
        y.append(label)
    
    if return_numpy:
        X = np.array(X)
        y = np.array(y)
    return (X, y)

def DataLoader(meta_path : str = "./data/meta.json", batch_size : int = 64, train : bool = True, shuffle : bool = True, 
              mode : str = "normal", n_flag : str = "negative", p_flag : str = "positive") -> Generator:
    if mode not in ["normal", "logmel", "mfcc_matrix", "mfcc_vector"]:
        raise ValueError('model must be within "normal", "logmel", "mfcc_vector", "mfcc_matrix"!receive {} instead.'.format(mode))
    bx, by = [], []
    with open(meta_path, "r", encoding="utf-8") as fp:
        file_dict = json.load(fp=fp)
    files = file_dict["train" if train else "test"]
    indices = np.arange(len(files))
    if shuffle:
        np.random.shuffle(indices)
    for index in indices:
        # determine the label
        if "//" in files[index]:
            split_file = files[index].split("//")
        elif "/" in files[index]:
            split_file = files[index].split("/")
        elif "\\" in files[index]:
            split_file = files[index].split("\\")
        if p_flag in split_file:
            label = 0
        elif n_flag in split_file:
            label = 1
        else:
            raise FileExistsError("""Input file path must carry the flag to represent the class! 
                But receive {} without {} or {}. Consider assign the p_flag and n_flag.""".format(files[index], p_flag, n_flag))
        by.append(label)
        feature = process_wav_file(files[index], mode=mode)
        x = np.array(feature, dtype="float32").tolist()
        bx.append(x)
        if len(bx) == batch_size:
            yield torch.FloatTensor(bx), torch.LongTensor(by)
            bx, by = [], []
    if len(bx) > 0:
        yield torch.FloatTensor(bx), torch.LongTensor(by)

def PositiveLoader(meta_path : str = "./data/meta.json", batch_size : int = 64, train : bool = True, shuffle : bool = True, mode : str = "normal") -> Generator:
    if mode not in ["normal", "logmel", "mfcc"]:
        raise ValueError('model must be within "normal", "logmel", "mfcc"!receive {} instead.'.format(mode))
    bx = []
    with open(meta_path, "r", encoding="utf-8") as fp:
        file_dict = json.load(fp=fp)
    files = file_dict["train" if train else "test"]
    indices = np.arange(len(files))
    if shuffle:
        np.random.shuffle(indices)
    for index in indices:        
        if mode == "normal":
            sample_rate, signal = wav_from_file(files[index], framing=True)
            feature = signal[:50] # get 50 frames only, about 1.5 seconds
        elif mode == "logmel":
            feature = get_logmel_from_file(files[index])[..., :100]

        x = np.array(feature, dtype="float32").tolist()
        bx.append([x])
        if len(bx) == batch_size:
            yield torch.FloatTensor(bx)
            bx = []
    if len(bx) > 0:
        yield torch.FloatTensor(bx)

if __name__ == "__main__":
    loader = DataLoader(mode="logmel")
    for bx, by in loader:
        print(bx.shape, by.shape, bx.dtype, by.dtype)