from typing import Iterable, List, Tuple

from scipy.signal import waveforms

from utils.constant import *
import matplotlib
from python_speech_features import *
import numpy as np

from sklearn.decomposition import PCA
from sklearn.cluster import KMeans, DBSCAN
from skimage.metrics import peak_signal_noise_ratio
from collections import Counter
import json
import os
import random
import librosa
from prettytable import PrettyTable

def wav_from_file(wav_file : str, framing : bool = False, window : str = "hamming"):
    if window not in ["hamming", "haining"]:
        raise ValueError("window must be hamming or haining, instead of {}!".format(window))

    # sample_rate, signal = scipy.io.wavfile.read(wav_file)
    signal, sample_rate = librosa.load(wav_file, sr=None)
    if not framing:
        return sample_rate, signal

    # millisecond
    frame_length = 32
    sample_length = int(sample_rate / 1000 * frame_length)
    frame_num = signal.shape[0] // sample_length

    if window == "hamming":
        w = [0.54 - 0.46 * np.cos(n * 2 * np.pi / (sample_length - 1)) for n in range(sample_length)]
    elif window == "haining":
        w = [0.5 * (1 - np.cos(n * 2 * np.pi / (sample_length - 1))) for n in range(sample_length)]
    w = np.array(w).reshape(-1)

    frame_out = []
    for i in range(frame_num):
        frame = signal[i * sample_length : (i + 1) * sample_length]
        frame_out.append(w * frame)
    frame_out = np.array(frame_out)
    return sample_rate, frame_out

def get_mfcc_from_array(signal : np.ndarray, sample_rate, numc : int = 13) -> np.ndarray:
    feature = librosa.feature.mfcc(y=signal, sr=sample_rate, n_mfcc=numc)
    return feature


def get_mfcc_from_file(wav_file : str, numc : int = 13) -> np.ndarray:
    sample_rate, signal = wav_from_file(wav_file)

    if len(signal) == 2 and signal[1] == 2:
        signal = (signal[:, 1] + signal[:, 0]) / 2
    
    return get_mfcc_from_array(
        signal=signal,
        sample_rate=sample_rate,
        numc=numc
    )

def get_logmel_from_file(wav_file : str, n_fft : int = 1024, hop_length : int = 512, n_mels : int = 128) -> np.ndarray:
    sample_rate, signal = wav_from_file(wav_file)
    melspec = librosa.feature.melspectrogram(signal, sample_rate, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)
    logmelspec = librosa.power_to_db(melspec)
    # logmelspec.shape = [n_mel, len(signal) // hop_length + 1]
    return logmelspec

def get_fft_from_array(signal : np.ndarray, sample_rate : int) -> Tuple[np.ndarray, np.ndarray]:
    signal = (signal[:, 1] + signal[:, 0]) / 2
    signal : np.ndarray = signal / (2 ** 15)
    times = np.arange(signal.size) / sample_rate

    spectrum = np.fft.fft(signal)
    freqs = np.fft.fftfreq(signal.size, times[1] - times[0])
    return spectrum, freqs


def get_fft_from_file(wav_file : str) -> Tuple[np.ndarray, np.ndarray]:
    sample_rate, signal = wav_from_file(wav_file)
    return get_fft_from_array(
        signal=signal,
        sample_rate=sample_rate
    )

def filter_wav_from_spectrum(spectrum : np.ndarray, freqs : np.ndarray, gain_factor : float, gain_range : tuple):
    spectrum = spectrum.copy() * 0.01
    gap = freqs[1] - freqs[0]
    left_gain_index = int(gain_range[0] / gap)
    right_gain_index = int(gain_range[1] / gap)
    spectrum[left_gain_index : right_gain_index] *= (90 * gain_factor)
    signal = np.fft.ifft(spectrum).real
    return spectrum, signal

def filter_wav_from_file(wav_file : str, gain_factor : float, gain_range : tuple):
    spectrum, freqs = get_fft_from_file(wav_file)
    return filter_wav_from_spectrum(
        spectrum=spectrum,
        freqs=freqs,
        gain_factor=gain_factor,
        gain_range=gain_range
    )

def mfcc_psnr_from_spectrum(mfcc1 : np.ndarray, mfcc2 : np.ndarray) -> float:
    min_tp = min(mfcc1.shape[0], mfcc2.shape[0])
    mfcc1 = mfcc1[:min_tp] 
    mfcc2 = mfcc2[:min_tp]
    mfcc1 = mfcc1 - mfcc1.min()
    mfcc2 = mfcc2 - mfcc2.min()

    mfcc1 = mfcc1 / mfcc1.max()
    mfcc2 = mfcc2 / mfcc2.max()

    i_mfcc1 = mfcc1[:, :13]
    c_mfcc1 = mfcc1[:, 13:]
    i_mfcc2 = mfcc2[:, :13]
    c_mfcc2 = mfcc2[:, 13:]
    
    return [
        peak_signal_noise_ratio(i_mfcc1, i_mfcc2),
        peak_signal_noise_ratio(c_mfcc1, c_mfcc2)
    ]

def mfcc_psnr_from_file(wav1_file : str, wav2_file : str) -> List[float]:
    mfcc1 = get_mfcc_from_file(wav1_file)
    mfcc2 = get_mfcc_from_file(wav2_file)
    return mfcc_psnr_from_spectrum(mfcc1, mfcc2)


def reduce_feature(feature : np.ndarray, output_dim : int) -> np.ndarray:
    model = PCA(n_components=output_dim)
    return model.fit_transform(feature)

def frame_mfcc_from_file(wav_file : str, normalise : bool = True, numc : int = 13) -> np.ndarray:
    sample_rate, signals = wav_from_file(wav_file, framing=True)
    mfcc_feature = []
    for signal in signals:
        mfcc = get_mfcc_from_array(signal, sample_rate, numc=numc)
        mfcc_feature.append(mfcc.reshape(-1))
    mfcc_feature = np.array(mfcc_feature)
    if normalise:
        mfcc_feature = (mfcc_feature - mfcc_feature.min()) / (mfcc_feature.max() - mfcc_feature.min())
    return mfcc_feature

def cluster_mfcc_feature(mfcc_feature : np.ndarray, cluster_name : str, n_clusters : int = 3, eps : float = 0.05, min_samples : int = 5):
    """
        notice the mfcc_feature must be shaped like [N, k]
    """
    if cluster_name not in ["KMeans", "DBSCAN"]:
        raise ValueError("cluster_name must be KMeans or DBSCAN, but receive {} instead".format(cluster_name))
    if cluster_name == "KMeans":
        labels = KMeans(n_clusters=3).fit_predict(mfcc_feature)
    if cluster_name == "DBSCAN":
        labels = DBSCAN(eps=0.05, min_samples=min_samples).fit_predict(mfcc_feature)
    return labels

def choose_point_base_on_label(points : np.ndarray, label : np.ndarray, loc : int = 0) -> np.ndarray:
    c_keys = dict(Counter(label))
    c_keys = sorted(c_keys, key=lambda x : c_keys[x], reverse=True)

    if isinstance(loc, int):
        choose_label = c_keys[loc]
        return points[np.where(label == choose_label)]
    elif isinstance(loc, list) or isinstance(loc, tuple):
        # choose_label = [c_keys[subloc] for subloc in loc]
        points = np.vstack([points[np.where(label == c_keys[subloc])] for subloc in loc if subloc in range(len(c_keys))])    
        return points
    raise TypeError("loc must be int or list or tuple! receive {} instead!".format(type(loc)))

def fuse_points(points : np.ndarray, method : str) -> np.ndarray:
    if method == "PCA":
        point = reduce_feature(points.T, output_dim=1)
        return point.T
    elif method == "average":
        point = points.mean(axis=0)[None]
        return point
    raise ValueError("must be in PCA or average!")

def fuse_pipeline(positive_wav_files : str, negative_wav_files : str, output_dim : int, fuse_method : str, loc : Iterable):
    p_points = [[] for _ in range(output_dim)]
    n_points = [[] for _ in range(output_dim)]
    for wav_file in positive_wav_files:
        mfcc_feature = frame_mfcc_from_file(wav_file)
        mfcc_feature = reduce_feature(mfcc_feature, output_dim=output_dim)
        labels = cluster_mfcc_feature(mfcc_feature, **cluster_dict)
        mfcc_feature = choose_point_base_on_label(mfcc_feature, labels, loc=loc)
        points = fuse_points(mfcc_feature, method="average")
        for point in points:
            for i, p in enumerate(point):
                p_points[i].append(p)
 
    for wav_file in negative_wav_files:
        mfcc_feature = frame_mfcc_from_file(wav_file)
        mfcc_feature = reduce_feature(mfcc_feature, output_dim=output_dim)
        labels = cluster_mfcc_feature(mfcc_feature, **cluster_dict)
        mfcc_feature = choose_point_base_on_label(mfcc_feature, labels, loc=loc)
        points = fuse_points(mfcc_feature, method="average")
        for point in points:
            for i, p in enumerate(point):
                n_points[i].append(p)
    return p_points, n_points

def get_meta_data(meta_data_file : str, train_ratio : float = 0.8, n_path : str = "./data/negative", p_path : str = "./data/positive"):
    negative_files = [os.path.abspath(os.path.join(n_path, file)) for file in os.listdir(n_path)]
    positive_files = [os.path.abspath(os.path.join(p_path, file)) for file in os.listdir(p_path)]
    negative_num = len(negative_files)
    positive_num = len(positive_files)
    random.shuffle(negative_files)
    random.shuffle(positive_files)

    n_offline = int(negative_num * train_ratio)
    p_offline = int(positive_num * train_ratio)
    
    train_files = positive_files[:p_offline] + negative_files[:n_offline]
    test_files = positive_files[p_offline:] + negative_files[n_offline:]
    random.shuffle(train_files)
    random.shuffle(test_files)

    trp = len(positive_files[:p_offline])
    trn = len(negative_files[:n_offline])
    tep = len(positive_files[p_offline:])
    ten = len(negative_files[n_offline:])
    
    with open(meta_data_file, "w", encoding="utf-8") as fp:
        json.dump(obj={"train" : train_files, "test" : test_files}, fp=fp, indent=4, ensure_ascii=False)
    print("meta data has been dumped at \033[32m{}\033[0m".format(meta_data_file))
    table = PrettyTable(field_names=["", "size", "positive size", "negative size"])
    table.add_row(["train", trp + trn, trp, trn])
    table.add_row(["test", tep + ten, tep, ten])
    print(table)

def check_path(path : str, check_path : bool = True, check_file : bool = False, suffix : str = None) -> bool:
    if check_path and (not os.path.exists(path)):
        raise FileNotFoundError("{} doesn't exist, check the path again!".format(path))
    if check_file and not os.path.isfile(path):
        raise FileExistsError("{} isn't a path of a wav file!".format(path))
    if (suffix is not None) and (not path.endswith(suffix)):
        raise TypeError("Check the file! The model must be ended with {}!".format(suffix))
    return True

def process_wav_file(file_wav : str, mode : str = "mfcc_vector", trim : int = 150, sample_num : int = 10000) -> np.ndarray:
    check_path(file_wav, check_file=True)

    if mode not in ["normal", "mfcc_vector", "mfcc_matrix", "logmel"]:
        raise ValueError("")
    if mode == "normal":
        sample_rate, signal = wav_from_file(file_wav)
        if len(signal) == 2 and signal[1] == 2:
            signal = (signal[:, 1] + signal[:, 0]) / 2
        return signal[:sample_num]

    elif mode == "mfcc_vector":
        mfcc = get_mfcc_from_file(file_wav, numc=8)
        if mfcc.shape[1] <= trim:
            raise ValueError("The input wav file lasts too short! Please enter a wav longer than 2 seconds!")
        mfcc = mfcc[..., :trim]
        mfcc = (mfcc - mfcc.min()) - (mfcc.max() - mfcc.min())
        return mfcc.reshape(-1)

    elif mode == "mfcc_matrix":
        mfcc = get_mfcc_from_file(file_wav, numc=8)
        if mfcc.shape[1] <= trim:
            raise ValueError("The input wav file lasts too short! Please enter a wav longer than 2 seconds!")
        mfcc = mfcc[..., :trim]
        mfcc = (mfcc - mfcc.min()) - (mfcc.max() - mfcc.min())
        return mfcc

    elif mode == "logmel":
        logmel = get_logmel_from_file(file_wav)[..., :100]
        logmel = (logmel - logmel.min()) / (logmel.max() - logmel.min())
        return logmel[None]
