import os
import glob
import torch


import numpy as np
import pandas as pd

from pprint import pprint
from random import shuffle
from torch.nn import Module
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from torch.utils.data import Dataset
from .dataio import read_audio, process_labels, TorchScaler

class Clamp(Module):
    def __init__(self, min=0, max=1):
        super().__init__()
        self.min = min
        self.max = max

    def forward(self, x):
        return x.clamp(self.min, self.max)

class SEDDataset(Dataset):
    def __init__(
        self,
        data_folder=None,
        mel_config=None,
        encoder=None,
        pad_to=10,
        fs=16000,
        return_filename=False,
        random_channel=False,
        multisrc=False,
        aug_transform=None
    ):
        self.encoder = encoder
        self.fs = fs
        self.data_folder = data_folder
        self.return_filename = return_filename
        self.random_channel = random_channel
        self.multisrc = multisrc
        self.pad_to = pad_to * fs if pad_to is not None else None
        self.samples = None
        # Default: SED pre-process
        sed_transform = torch.nn.Sequential(
            MelSpectrogram(**mel_config),
            AmplitudeToDB(stype='power', top_db=80),
            Clamp(min=-50, max=80),
            TorchScaler("instance", "minmax", [0, 1]),
        )
        if aug_transform is None:
            aug_transform = torch.nn.Identity()
        else:
            assert isinstance(aug_transform, torch.nn.Module), "feat_transform must be a torch.nn.Module"
        self.feat_transform = torch.nn.Sequential(
            sed_transform,
            aug_transform,
        )
        self.sed_transform = sed_transform
        self.aug_transform = aug_transform


    def get_samples(self):
        pass
        return 

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, item):
        pass
        return

class DomainAgnosticSet(SEDDataset):
    def __init__(self, dataset_info: dict, **kwargs):
        super().__init__(**kwargs)
        self.dataset_info = dataset_info
        self.all_samples = {}
        assert list(dataset_info.keys()) == ["in_domain", "out_domain"], "wrong data info"
        for key, info in dataset_info.items():
            if info.get("tsv_file") is not None:
                samples = self.get_samples(info["folder"], info["tsv_file"])
            else:
                samples = self.get_samples(info["folder"])
            self.all_samples[key] = samples
        shuffle(self.all_samples["in_domain"])
        shuffle(self.all_samples["out_domain"])
        # Assume id samples < od samples
        res_audio = len(self.all_samples["out_domain"]) % len(self.all_samples["in_domain"])
        self.all_samples["out_domain"] = self.all_samples["out_domain"][:-res_audio]
        ratio = len(self.all_samples["out_domain"]) // len(self.all_samples["in_domain"])
        print(
            "in_domain: {}, out_domain: {}, ratio: {}".format(
            len(self.all_samples["in_domain"]), 
            len(self.all_samples["out_domain"]), 
            ratio
            )
        )
        self.samples = []
        total_amount = len(self.all_samples["in_domain"]) + len(self.all_samples["out_domain"])
        for i in range(total_amount):
            if i % (ratio + 1) == 0:
                idx = i // (ratio + 1)
                self.samples.append(self.all_samples["in_domain"][idx])
            else:
                idx = i // (ratio + 1) * ratio + i % (ratio + 1) - 1
                self.samples.append(self.all_samples["out_domain"][idx])            

        self.iod_folders = {
            "in_domain": dataset_info["in_domain"]["folder"],
            "out_domain": dataset_info["out_domain"]["folder"]
        }

    def get_samples(self, data_folder, tsv_file=None):
        if tsv_file is not None:
            filenames = pd.read_csv(tsv_file, sep="\t")["filename"].tolist()
            samples = [os.path.join(data_folder, f) for f in filenames]
        else:
            samples = glob.glob(os.path.join(data_folder, "*.wav"))
        return samples

    def __getitem__(self, item):
        c_ex = self.samples[item]

        mixture, _, _, padded_indx = read_audio(
            c_ex, self.multisrc, self.random_channel, self.pad_to
        )
        max_len_targets = self.encoder.n_frames
        strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
        sed_feats = self.sed_transform(mixture)
        aug_feats = self.aug_transform(sed_feats)
        label = strong.transpose(0, 1)
        out_args = [sed_feats, aug_feats, label, padded_indx]

        if self.return_filename:
            out_args.append(c_ex)
        out_args.append(self.dataset_info["in_domain"]["folder"] in c_ex)
        return out_args

class WeakSet(SEDDataset):
    def __init__(self, tsv_file, val=False, cache_set=True, **kwargs):
        super().__init__(**kwargs)
        self.cache_samples = []
        self.tsv_file = tsv_file
        self.full_set = cache_set
        
        self.samples = self.get_samples()
        self.samples_list = list(self.samples.keys())
        
        print("supervised weak ({})".format("val" if val else "train"), len(self))
    
    def get_samples(self):
        samples = {}
        for i, r in self.tsv_file.iterrows():
            if r["filename"] not in samples.keys():
                samples[r["filename"]] = {
                    "mixture": os.path.join(self.data_folder, r["filename"]),
                    "events": r["event_labels"].split(","),
                }
        return samples
    
    def __getitem__(self, item):
        if (len(self.cache_samples) < len(self)) or (not self.full_set):
            file = self.samples_list[item]
            c_ex = self.samples[file]

            mixture, _, _, padded_indx = read_audio(
                c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
            )

            labels = c_ex["events"]
            max_len_targets = self.encoder.n_frames
            weak = torch.zeros(max_len_targets, len(self.encoder.labels))
            if len(labels):
                weak_labels = self.encoder.encode_weak(labels)
                weak[0, :] = torch.from_numpy(weak_labels).float()
            self.cache_samples.append((mixture, weak, padded_indx))
        else:
            mixture, weak, padded_indx = self.cache_samples[item]
        feat = self.feat_transform(mixture)
        label = weak.transpose(0, 1)
        out_args = [feat, label, padded_indx]

        if self.return_filename:
            out_args.append(c_ex["mixture"])

        return out_args


class StrongSet(SEDDataset):
    def __init__(self, tsv_file, val=False, cache_set=True, **kwargs):
        super().__init__(**kwargs)
        self.cache_samples = []
        self.full_set = cache_set
        self.tsv_file = tsv_file
        
        self.samples = self.get_samples()
        self.samples = {k: v for k, v in self.samples.items() if v["events"]}
        self.samples_list = list(self.samples.keys()) 

        print("supervised strong ({})".format("val" if val else "train"), len(self))
    
    def get_samples(self):
        samples = {}
        for i, r in self.tsv_file.iterrows():
            if r["filename"] not in samples.keys():
                samples[r["filename"]] = {
                    "mixture": os.path.join(self.data_folder, r["filename"]),
                    "events": [],
                }
                if not np.isnan(r["onset"]):
                    samples[r["filename"]]["events"].append(
                        {
                            "event_label": r["event_label"],
                            "onset": r["onset"],
                            "offset": r["offset"],
                        }
                    )
            else:
                if not np.isnan(r["onset"]):
                    samples[r["filename"]]["events"].append(
                        {
                            "event_label": r["event_label"],
                            "onset": r["onset"],
                            "offset": r["offset"],
                        }
                    )
        return samples
    
    def __getitem__(self, item):
        if (len(self.cache_samples) < len(self)) or (not self.full_set):
            file = self.samples_list[item]
            c_ex = self.samples[file]

            mixture, onset_s, offset_s, padded_indx = read_audio(
                c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
            )
            # labels
            labels = c_ex["events"]
            
            # to steps
            labels_df = pd.DataFrame(labels)
            labels_df = process_labels(labels_df, onset_s, offset_s)

            if not len(labels_df):
                max_len_targets = self.encoder.n_frames
                strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
            else:
                strong = self.encoder.encode_strong_df(labels_df)
                strong = torch.from_numpy(strong).float()
            self.cache_samples.append((mixture, strong, padded_indx))
        else:
            mixture, strong, padded_indx = self.cache_samples[item]
        feat = self.feat_transform(mixture)
        label = strong.transpose(0, 1)
        out_args = [feat, label, padded_indx]

        if self.return_filename:
            out_args.append(c_ex["mixture"])

        return out_args


class UnlabeledSet(SEDDataset):
    def __init__(self, tsv_file=None, **kwargs):
        super().__init__(**kwargs)
        self.data_folder = self.data_folder
        if tsv_file is not None:
            self.tsv_file = tsv_file
            assert isinstance(self.data_folder, str), "data folder must be a string when using tsv_file"
            self.samples = self.get_samples(tsv_file)
        else:
            if isinstance(self.data_folder, str):
                self.data_folder = [self.data_folder]
            self.samples = self.get_samples()
        print("unlabeled (train)", len(self))

    def get_samples(self, tsv_file=None):
        if tsv_file is not None:
            filenames = pd.read_csv(tsv_file, sep="\t")["filename"].tolist()
            samples = [os.path.join(self.data_folder, f) for f in filenames]
            return samples
        else:
            samples = []
            for folder in self.data_folder:
                samples += glob.glob(os.path.join(folder, "*.wav"))
            
        return samples

    def __getitem__(self, item):
        c_ex = self.samples[item]

        mixture, _, _, padded_indx = read_audio(
            c_ex, self.multisrc, self.random_channel, self.pad_to
        )

        max_len_targets = self.encoder.n_frames
        strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
        feat = self.feat_transform(mixture)
        if len(feat) == 3:
            crop_pos = feat[-1]
            feat = (feat[0], feat[1])
        else:
            crop_pos = None
        label = strong.transpose(0, 1)
        out_args = [feat, label, padded_indx]

        if self.return_filename:
            out_args.append(c_ex)

        if crop_pos is not None:
            out_args.append(crop_pos)
        return out_args