import os
import torch
import torch.nn as nn

import numpy as np
import pandas as pd
import soundfile as sf
from typing import *
from pytorch_lightning.utilities.rank_zero import rank_zero_info

from random import shuffle
from torchaudio.transforms import MelSpectrogram
from torch.utils.data import Dataset
from data_loader.utils.io import read_audio, process_labels
from data_loader.transforms.data_augmentation import RandomAugment

class SEDDataset(Dataset):
    def __init__(
        self,
        tsv_file=None,
        data_folder=None,
        mel_config=None,
        encoder=None,
        pad_to=10,
        fs=16000,
        random_channel=False,
        multisrc=False,
        contrastive_aug: Optional[nn.Module] = None,
        extra_transform: Optional[nn.Module] = None,
        dur_file=None, 
        data_augmentation: Optional[nn.Module] = RandomAugment(aug_intensity=5, label_pool=4)
    ):
        self.tsv_file = tsv_file
        self.tsv_df = pd.read_csv(tsv_file, sep="\t") if tsv_file is not None else None
        self.encoder = encoder
        self.fs = fs
        self.data_folder = data_folder
        self.random_channel = random_channel
        self.multisrc = multisrc
        self.pad_to = pad_to * fs if pad_to is not None else None
        self.samples = None
        self.dur_file = dur_file
        self.data_augmentation = data_augmentation
        # Default: SED pre-process
        sed_transform = torch.nn.Sequential(
            MelSpectrogram(**mel_config),
            # Amp2dB(),
            # Clamp(min=-50, max=80),
            # TorchScaler("instance", "minmax", [0, 1]),
        )
        if extra_transform is None:
            self.feat_transform = [sed_transform]
        else:
            self.feat_transform = [sed_transform, extra_transform]
        self.sed_transform = sed_transform
        self.contrastive_aug = contrastive_aug


    def get_samples(self):
        pass
        return 

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, item):
        pass
        return

class ContrastiveSet(SEDDataset):
    def __init__(self, tsv_file, **kwargs):
        super().__init__(**kwargs)        
        self.samples = self.get_samples(self.data_folder, tsv_file)
        # shuffle samples
        # sort by filenames
        self.samples = sorted(self.samples)
        # cleanup samples
        for sample in self.samples:
            dur = sf.info(sample).duration
            if dur < 1:
                # rank_zero_info(f"Removing short sample: {sample} with duration {dur:.2f}s")
                self.samples.remove(sample)
        rank_zero_info(f"Contrastive: {len(self)}")
        assert self.contrastive_aug is not None, "ContrastiveSet requires a contrastive augmentation transform"
        
    def get_samples(self, data_folder, tsv_file):
        samples = []
        assert type(data_folder) == type(tsv_file), "data_folder and tsv_file must be of the same type"
        if type(data_folder) == str:
            data_folder = [data_folder]
            tsv_file = [tsv_file]
        for folder, tsv in zip(data_folder, tsv_file):
            unfound = 0
            filenames = pd.read_csv(tsv, sep="\t")["filename"].tolist()
            for f in filenames:
                filename = os.path.join(folder, f)
                if os.path.exists(filename):
                    samples.append(filename)
                else:
                    unfound+=1
            print(f"Unfound samples in {tsv}: {unfound}")
        return samples

    def __getitem__(self, item):
        c_ex = self.samples[item]

        mixture, _, _, padded_indx = read_audio(
            c_ex, self.multisrc, self.random_channel, self.pad_to
        )
        feat = [x(mixture) for x in self.feat_transform]
        # ATSTFrame contrastive augmentation, returns: two augmented view + mask indices
        view_1, view_2, feats, masks = self.contrastive_aug(feat)
        params = {
            "mixture": c_ex,
            "events": None,
            "padded_indx": padded_indx,
            "label_type": "contrastive",
        }
        return feat, view_1, view_2, masks, params
    
    
class WeakSet(SEDDataset):
    def __init__(self, val=False, **kwargs):
        super().__init__(**kwargs)
        self.samples = self.get_samples()
        self.samples_list = list(self.samples.keys())
        self.val = val
        self.samples_list.sort()
        rank_zero_info(f"supervised weak ({'val' if val else 'train'}): {len(self)}")
    
    def get_samples(self):
        samples = {}
        for i, r in self.tsv_df.iterrows():
            if r["filename"] not in samples.keys():
                samples[r["filename"]] = {
                    "mixture": os.path.join(self.data_folder, r["filename"]),
                    "events": r["event_labels"].split(","),
                }
        return samples
    
    def __getitem__(self, item):
        file = self.samples_list[item]
        c_ex = self.samples[file]
        mixture, _, _, padded_indx = read_audio(
            c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
        )
        labels = c_ex["events"]
        max_len_targets = self.encoder.n_frames
        weak = torch.zeros(max_len_targets, len(self.encoder.labels))
        if len(labels):
            weak_labels = self.encoder.encode_weak(labels)
            weak[0, :] = torch.from_numpy(weak_labels).float()
        feat = [x(mixture) for x in self.feat_transform]
        label = weak.transpose(0, 1)
        # if self.data_augmentation is not None and not self.val:
        #     feat, label = self.data_augmentation(feat[0], label)
        #     feat = [feat]  # Ensure feat is a list
        params = {
            "mixture": c_ex["mixture"],
            "events": c_ex["events"],
            "padded_indx": padded_indx,
            "label_type": "weak",
        }
        return feat, label, params


class StrongSet(SEDDataset):
    def __init__(self, val=False, **kwargs):
        super().__init__(**kwargs)
        self.cache_samples = []
        self.samples = self.get_samples()
        self.samples = {k: v for k, v in self.samples.items() if v["events"]}
        self.samples_list = list(self.samples.keys())
        self.val = val
        rank_zero_info(f"supervised strong ({'val' if val else 'train'}): {len(self)}")
        
    
    def get_samples(self):
        samples = {}
        for i, r in self.tsv_df.iterrows():
            if r["filename"] not in samples.keys():
                samples[r["filename"]] = {
                    "mixture": os.path.join(self.data_folder, r["filename"]),
                    "events": [],
                }
                if not np.isnan(r["onset"]):
                    samples[r["filename"]]["events"].append(
                        {
                            "event_label": r["event_label"],
                            "onset": r["onset"],
                            "offset": r["offset"],
                        }
                    )
            else:
                if not np.isnan(r["onset"]):
                    samples[r["filename"]]["events"].append(
                        {
                            "event_label": r["event_label"],
                            "onset": r["onset"],
                            "offset": r["offset"],
                        }
                    )
        return samples
    
    def __getitem__(self, item):
        file = self.samples_list[item]
        c_ex = self.samples[file]

        mixture, onset_s, offset_s, padded_indx = read_audio(
            c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to, self.val
        )
        # labels
        labels = c_ex["events"]
        labels_df = pd.DataFrame(labels)
        labels_df = process_labels(labels_df, onset_s, offset_s)

        if not len(labels_df):
            max_len_targets = self.encoder.n_frames
            strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
        else:
            strong = self.encoder.encode_strong_df(labels_df)
            strong = torch.from_numpy(strong).float()
            
        feat = [x(mixture) for x in self.feat_transform]
        label = strong.transpose(0, 1)
        # if self.data_augmentation is not None and not self.val:
        #     feat, label = self.data_augmentation(feat[0], label)
        #     feat = [feat]  # Ensure feat is a list

        if "synthetic" in c_ex["mixture"]:
            label_type = "strong_synthetic"
        elif "real" in c_ex["mixture"]:
            label_type = "strong_real"
        elif "validation" in c_ex["mixture"]:
            label_type = "strong_eval"
        else:
            raise ValueError("mixture type not recognized, must be 'synthetic' or 'real'")
        params = {
            "mixture": c_ex["mixture"],
            "events": c_ex["events"],
            "padded_indx": padded_indx,
            "label_type": label_type,
        }
        return feat, label, params

class UnlabeledSet(SEDDataset):
    def __init__(self, tsv_file, **kwargs):
        super().__init__(**kwargs)        
        self.samples = self.get_samples(self.data_folder, tsv_file)
        # shuffle samples
        shuffle(self.samples)
        for sample in self.samples:
            dur = sf.info(sample).duration
            if dur < 1:
                # rank_zero_info(f"Removing short sample: {sample} with duration {dur:.2f}s")
                self.samples.remove(sample)
        rank_zero_info(f"Unlabeled: {len(self)}")

    def get_samples(self, data_folder, tsv_file):
        samples = []
        assert type(data_folder) == type(tsv_file), "data_folder and tsv_file must be of the same type"
        if type(data_folder) == str:
            data_folder = [data_folder]
            tsv_file = [tsv_file]
        for folder, tsv in zip(data_folder, tsv_file):
            unfound = 0
            filenames = pd.read_csv(tsv, sep="\t")["filename"].tolist()
            for f in filenames:
                filename = os.path.join(folder, f)
                if os.path.exists(filename):
                    samples.append(filename)
                else:
                    unfound+=1
            print(f"Unfound samples in {tsv}: {unfound}")
        return samples

    def __getitem__(self, item):
        c_ex = self.samples[item]

        mixture, _, _, padded_indx = read_audio(
            c_ex, self.multisrc, self.random_channel, self.pad_to
        )

        max_len_targets = self.encoder.n_frames
        strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
        feat = [x(mixture) for x in self.feat_transform]
        if len(feat) == 2:
            feat = (feat[0], feat[1])
        label = strong.transpose(0, 1)
        # if self.data_augmentation is not None:
        #     feat, label = self.data_augmentation(feat[0], label)
        #     feat = [feat]  # Ensure feat is a list
        params = {
            "mixture": c_ex,
            "events": None,
            "padded_indx": padded_indx,
            "label_type": "unlabeled",
        }
        return feat, label, params