import os
import numpy as np
import pandas as pd
import librosa as lb
from tqdm import tqdm
from data_loader.utils.io import read_audio
from torchaudio.transforms import Spectrogram
from essentia.standard import PitchSalience, SpectralComplexity, Flux
# filename



class AudioFeatures:
    def __init__(
        self, 
        fs=16000,
        win_size=2048,
        hop_size=256,
        lowBoundary=50, 
        highBoundary=4000,
        default_avg="energy",
        ):
        self.fs = fs
        self.hop_size = hop_size
        self.win_size = win_size
        self.lowBoundary = lowBoundary
        self.highBoundary = highBoundary
        self.default_avg = default_avg
        self.init_stft()
        self.PitchSal = PitchSalience(sampleRate=fs, lowBoundary=lowBoundary, highBoundary=highBoundary)
        self.SpecCmlx = SpectralComplexity(sampleRate=fs, magnitudeThreshold=0.01)
        self.Flux = Flux(halfRectify=False, norm="L2")

    def load(self, filename):
        return read_audio(filename, multisrc=False, random_channel=False, pad_to=None, val=False)[0]

    def init_stft(self):
        self.stft_fn = Spectrogram(
        n_fft=self.win_size,
        hop_length=self.hop_size,
        win_length=self.win_size,
        power=None,  # Use complex output
        normalized=True,
        center=True,
        onesided=True
        )
        
    def stft(self, wav):
        # Magnitude spectrogram
        spectrogram = self.stft_fn(wav.unsqueeze(0)).squeeze(0).abs().numpy()
        # shape: (T, F)
        return spectrogram.T
    
    def interest_stft(self, wav):
        # Compute STFT
        stft = self.stft(wav)
        # Frequency indices
        freqs = np.fft.rfftfreq(self.win_size, d=1/self.fs)
        # Select frequencies within the interest range
        indices = np.where((freqs >= self.lowBoundary) & (freqs <= self.highBoundary))[0]
        return stft[:, indices]
    
    def average(self, feats, specs):
        assert len(feats) == len(specs), "Number of features and specs must match"
        feats = np.array(feats)
        if self.default_avg == "energy":
            # Weighted average by energy
            energy = np.sum(specs ** 2, axis=1)
            weights = energy / (np.sum(energy) + 1e-8)
        elif self.default_avg == "mean":
            # Simple mean
            weights = np.ones(len(feats)) / len(feats)       
        return np.matmul(weights, feats)

    
    def pitch_salience(self, filename=None, specs=None):
        wav = self.load(filename)
        specs = self.stft(wav)
        pitch_salience = []
        for spec in specs:
            pitch_salience.append(self.PitchSal(spec))
        return self.average(pitch_salience, specs)   
    
    def spectral_complexity(self, filename=None, specs=None):
        assert filename is not None or specs is not None, "Either filename or specs must be provided"
        if filename is not None:
            wav = self.load(filename)
            specs = self.interest_stft(wav)
        elif specs is not None:
            assert isinstance(specs, np.ndarray), "Specs must be a numpy array"
            assert specs.ndim == 2, "Specs must be a 2D array (T, F)"
        complexity = []
        for spec in specs:
            complexity.append(self.SpecCmlx(spec))
        return self.average(complexity, specs)

    def spectral_flatness(self, filename=None, specs=None):
        assert filename is not None or specs is not None, "Either filename or specs must be provided"
        if filename is not None:
            wav = self.load(filename)
            specs = self.interest_stft(wav)
        elif specs is not None:
            assert isinstance(specs, np.ndarray), "Specs must be a numpy array"
            assert specs.ndim == 2, "Specs must be a 2D array (T, F)"
        flatness = lb.feature.spectral_flatness(S=specs.T, power=2) # Transpose to (F, T) for librosa compatibility
        # convert flatness to dB
        flatness = 10 * np.log10(flatness + 1e-10)
        return self.average(flatness.squeeze(), specs)

    def spectral_flux(self, filename=None, specs=None):
        assert filename is not None or specs is not None, "Either filename or specs must be provided"
        if filename is not None:
            wav = self.load(filename)
            specs = self.interest_stft(wav)
        elif specs is not None:
            assert isinstance(specs, np.ndarray), "Specs must be a numpy array"
            assert specs.ndim == 2, "Specs must be a 2D array (T, F)"
        flux = []
        for spec in specs:
            flux.append(self.Flux(spec))
        return self.average(flux[1:], specs[1:])

    def cal_all(self, tsv_file, data_folder, output_folder="./data_eng/statistics/output/"):
        tsv = pd.read_csv(tsv_file, sep="\t")
        filenames = tsv["filename"].unique()
        new_df = pd.DataFrame(columns=["filename", "spectral_flatness", "pitch_salience", "spectral_complexity", "spectral_flux"])
        new_df["filename"] = filenames
        flatness = []
        salience = []
        complexity = []
        flux = []
        non_exist = 0
        for f in tqdm(new_df["filename"]):
            filename = os.path.join(data_folder, f)
            if not os.path.exists(filename):
                non_exist += 1
                print(f"File {filename} does not exist, not existing files: {non_exist}")
                flatness.append(np.nan)
                salience.append(np.nan)
                complexity.append(np.nan)
                flux.append(np.nan)
                
                continue
            wav = self.load(filename)
            if len(wav) < 8000:
                print(wav.shape)
                print(f"File {filename} is too short, skipping")
                flatness.append(np.nan)
                salience.append(np.nan)
                complexity.append(np.nan)
                flux.append(np.nan)
                continue
            specs = self.interest_stft(wav)
            pitch_salience = self.pitch_salience(filename, specs)
            spectral_flatness = self.spectral_flatness(filename, specs)
            spectral_complexity = self.spectral_complexity(filename, specs)
            spectral_flux = self.spectral_flux(filename, specs)
            flatness.append(spectral_flatness)
            salience.append(pitch_salience)
            complexity.append(spectral_complexity)
            flux.append(spectral_flux)
        new_df["spectral_flatness"] = flatness
        new_df["pitch_salience"] = salience
        new_df["spectral_complexity"] = complexity
        new_df["spectral_flux"] = flux
        # save to current folder
        out_path = os.path.join(output_folder, tsv_file.split("/")[-1])
        new_df.to_csv(out_path, sep="\t", index=False)

if __name__ == "__main__":
    import yaml
    audio_extractor = AudioFeatures(win_size=2048, hop_size=256, default_avg="energy")
    # filename = "/nvmework1/shaonian/Datasets/AudioSet_strong/data/train/__COuw8Qqrw_210000.wav"
    # waveform = audio_extractor.load(filename)
    # spectrogram = audio_extractor.stft(waveform)
    # pitch_salience = audio_extractor.pitch_salience(filename)
    # spectral_flatness = audio_extractor.spectral_flatness(filename)
    # print("Pitch salience", pitch_salience, "spectral flatness:", spectral_flatness)
    # spectral_complexity = audio_extractor.spectral_complexity(filename)
    # print("Spectral complexity:", spectral_complexity)
    # spectral_flux = audio_extractor.spectral_flux(filename)
    # print("Spectral flux:", spectral_flux)
    
    ### In-domain data
    # id_data_path = "./configs/dataset/desed.yaml"
    # id_data_path = yaml.safe_load(open(id_data_path, "r"))
    # data_paths = id_data_path["data"]["init_args"]["path_configs"]
    # folder_tsv_pairs = [
    #     (data_paths["strong_folder"], data_paths["train_strong_real_tsv"]),
    #     (data_paths["strong_folder"], data_paths["val_strong_real_tsv"]),
    #     (data_paths["train_synth_folder"], data_paths["train_strong_synth_tsv"]),
    #     (data_paths["val_strong_synth_folder"], data_paths["val_strong_synth_tsv"]),
    #     (data_paths["train_weak_folder"], data_paths["train_weak_tsv"]),
    #     (data_paths["test_folder"], data_paths["test_tsv"]),
    #     (data_paths["unlabeled_id_folder"], data_paths["unlabeled_id_tsv"]),
    # ]
    

    
    ### Out domain data
    folder_tsv_pairs = [
        # ("/nvmework1/shaonian/Datasets/FSD50K/extracted/", "./configs/dataset/desed_tsv/train_unlabeled_od_FSD50K_label_mapping.tsv"),
        # ("/nvmework1/shaonian/Datasets/AudioSet_strong/data/train/", "./configs/dataset/desed_tsv/DESED_AS_OD_meta.tsv"),
        # ("/nvmework1/shaonian/Datasets/FSD50K/extracted/", "./configs/dataset/desed_tsv/train_unlabeled_od_FSD50K_domestic.tsv"),
        ("/nvmework1/shaonian/Datasets/AudioSet_extracted/", "./configs/dataset/desed_tsv/train_unlabeled_od_AudioSet_label_mapping.tsv"),
        ("/nvmework1/shaonian/Datasets/AudioSet_extracted/", "./configs/dataset/desed_tsv/train_unlabeled_od_AudioSetStrong_label_mapping.tsv")
    ]

    for folder, tsv_file in folder_tsv_pairs:
        print(f"Processing {tsv_file} in {folder}")
        audio_extractor.cal_all(tsv_file, folder)
        print(f"Finished processing {tsv_file}, results saved to {os.path.join('./data_eng/statistics/output/', tsv_file.split('/')[-1])}")
    