import os

import pandas as pd

from torch.utils.data import ConcatDataset, DataLoader
from .class_dict import classes_labels
from .label_encoder import ManyHotEncoder
from .datasets import WeakSet, StrongSet, DomainAgnosticSet
from .sampler import ConcatDatasetSampler
from .domain_agnostic_collate import collate_fn
from functools import partial

def get_datasets(path_configs, feat_configs, train_set_index, aug_transform=False, selfsl_usage=None):
    # ManyHotEncoder
    label_encoder = ManyHotEncoder(
        list(classes_labels.keys()),
        audio_len=feat_configs["audio_max_len"],
        frame_len=feat_configs["n_fft"],
        frame_hop=feat_configs["hop_length"],
        net_pooling=feat_configs["net_pooling"],
        fs=feat_configs["sample_rate"],
    )

    # LogMel configs
    mel_config = {
        "n_fft": feat_configs["n_fft"],
        "win_length": feat_configs["win_length"],
        "hop_length": feat_configs["hop_length"],
        "n_mels": feat_configs["n_mels"],
        "sample_rate": feat_configs["sample_rate"],
        "f_min": feat_configs["f_min"],
        "f_max": feat_configs["f_max"],
    }

    # Test dataset
    devtest_df = pd.read_csv(path_configs["test_tsv"], sep="\t")
    devtest_set = StrongSet(
        data_folder=path_configs["test_folder"],
        encoder=label_encoder,
        tsv_file=devtest_df,
        return_filename=True,
        pad_to=feat_configs["audio_max_len"],
        mel_config=mel_config,
        val=True
    )
    
    # Validation dataset
    synth_val_df = pd.read_csv(path_configs["synth_val_tsv"], sep="\t")
    synth_val_set = StrongSet(
        data_folder=path_configs["synth_val_folder"],
        encoder=label_encoder,
        tsv_file=synth_val_df,
        pad_to=feat_configs["audio_max_len"],
        return_filename=True,
        mel_config=mel_config,
        val=True
    )

    weak_df = pd.read_csv(path_configs["weak_tsv"], sep="\t")
    train_weak_df = weak_df.sample(
        frac=feat_configs["weak_split"],
        random_state=feat_configs["seed"]
    )
    valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
    weak_val_set = WeakSet(
        data_folder=path_configs["weak_folder"],
        encoder=label_encoder,
        tsv_file=valid_weak_df,
        pad_to=feat_configs["audio_max_len"],
        return_filename=True,
        mel_config=mel_config,
        val=True,
    )

    strong_val_df = pd.read_csv(path_configs["strong_val_tsv"], sep="\t")
    strong_val_set = StrongSet(
        data_folder=path_configs["strong_folder"],
        encoder=label_encoder,
        tsv_file=strong_val_df,
        pad_to=feat_configs["audio_max_len"],
        return_filename=True,
        mel_config=mel_config,
        val=True
    )   

    # Train dataset
    weak_train_set = []
    synth_train_set = []
    strong_train_set = []
    unsup_in_set = []
    unsup_out_set = []
    
    if train_set_index[0]:
        strong_train_df = pd.read_csv(path_configs["strong_train_tsv"], sep="\t")
        strong_train_set = [StrongSet(
            data_folder=path_configs["strong_folder"],
            encoder=label_encoder,
            tsv_file=strong_train_df,
            pad_to=feat_configs["audio_max_len"],
            return_filename=True,
            mel_config=mel_config,
            aug_transform=aug_transform if selfsl_usage[0] else None,
        )]

    if train_set_index[1]:
        synth_train_df = pd.read_csv(path_configs["synth_tsv"], sep="\t")
        synth_train_set = [StrongSet(
            data_folder=path_configs["synth_folder"],
            encoder=label_encoder,
            tsv_file=synth_train_df,
            pad_to=feat_configs["audio_max_len"],
            return_filename=True,
            mel_config=mel_config,
            aug_transform=aug_transform if selfsl_usage[1] else None,
        )]

    if train_set_index[2]:
        train_weak_df = train_weak_df.reset_index(drop=True)
        weak_train_set = [WeakSet(
            data_folder=path_configs["weak_folder"],
            encoder=label_encoder,
            tsv_file=train_weak_df,
            pad_to=feat_configs["audio_max_len"],
            return_filename=True,
            mel_config=mel_config,
            aug_transform=aug_transform if selfsl_usage[2] else None,
        )]

    if train_set_index[3]:
        unsup_in_set = [
            DomainAgnosticSet(
                dataset_info={
                    "in_domain": {
                        "folder": path_configs["unlabeled_in_folder"],
                    },
                    "out_domain": {
                        "folder": path_configs["od_root"],
                        "tsv_file": path_configs["od_unlabeled_tsv"]
                    }
                },
                encoder=label_encoder,
                pad_to=feat_configs["audio_max_len"],
                return_filename=True,
                mel_config=mel_config,
                aug_transform=aug_transform if selfsl_usage[3] else None,         
        )]


    train_set_list = synth_train_set + strong_train_set + weak_train_set + unsup_in_set + unsup_out_set
    val_set_list = [synth_val_set, strong_val_set, weak_val_set]
    test_set = devtest_set
    return label_encoder, train_set_list, val_set_list, test_set


def get_dataloaders(batch_sizes, path_configs, feat_configs, val_batch_size, n_workers, aug_transform=None, selfsl_usage=[0, 0, 0, 0, 0]):
    assert len(batch_sizes) == 4, "wrong batch size list, must with length of 4"
    mode = sum([x > 0 for x in batch_sizes[:3]]) - 1 # mode stands for using which dataset as the index of the end of epochs
    id_len = sum(batch_sizes[:3])   # id_len: strong + synth + weak; used in collate func
    train_set_index = [x > 0 for x in batch_sizes]
    batch_sizes = [x for x in batch_sizes if x > 0]

    label_encoder, train_set_list, val_set_list, test_set = get_datasets(
        path_configs, 
        feat_configs, 
        train_set_index,
        aug_transform=aug_transform,
        selfsl_usage=selfsl_usage
    )
    
    # mode=2 means use the weak set as an index of the end of epochs (determine epoch steps)
    train_sampler = ConcatDatasetSampler(train_set_list, batch_sizes, shuffle=False, mode=mode)

    # setup datasets
    train_set = ConcatDataset(train_set_list)
    val_set = ConcatDataset(val_set_list)

    # generate dataloaders
    
    train_loader = DataLoader(train_set, sampler=train_sampler, collate_fn=partial(collate_fn, id_len=id_len), batch_size=train_sampler.get_bsz(), num_workers=n_workers)
    val_loader = DataLoader(val_set, batch_size=val_batch_size, shuffle=False, num_workers=n_workers)
    test_loader = DataLoader(test_set, batch_size=val_batch_size, shuffle=False, num_workers=n_workers)
        
    return label_encoder, train_loader, val_loader, test_loader