import torch
import torch.nn as nn
from typing import Optional
from torch.utils.data import ConcatDataset, DataLoader
from data_loader.transforms.data_augmentation import RandomAugment
from data_loader.utils.class_dict import classes_labels
from data_loader.utils.label_encoder import ManyHotEncoder
from data_loader.utils.sampler import ConcatDatasetSampler
from data_loader.utils.train_collate_func import collate_fn
from data_loader.utils.default_collate_func import default_collate_func
from data_loader.datasets.datasets import WeakSet, StrongSet, UnlabeledSet, ContrastiveSet
from functools import partial
import pytorch_lightning as pl

class SEDDataModule(pl.LightningDataModule):
    def __init__(
        self,
        path_configs,
        feat_configs,
        batch_sizes,
        val_batch_size,
        n_workers,
        extra_transform: Optional[nn.Module] = None,
        contrastive_aug: Optional[nn.Module] = None,
        data_augmentation: Optional[nn.Module] = RandomAugment(aug_intensity=5, label_pool=4)
    ):
        super().__init__()
        self.path_configs = path_configs
        self.feat_configs = feat_configs
        self.batch_sizes = batch_sizes
        self.val_batch_size = val_batch_size
        self.n_workers = n_workers
        self.contrastive_aug = contrastive_aug
        self.extra_transform = extra_transform

        self.label_encoder = ManyHotEncoder(
            list(classes_labels.keys()),
            audio_len=feat_configs["audio_max_len"],
            frame_len=feat_configs["n_fft"],
            frame_hop=feat_configs["hop_length"],
            net_pooling=feat_configs["net_pooling"],
            fs=feat_configs["sample_rate"],
        )
        self.mel_config = {
            "n_fft": feat_configs["n_fft"],
            "win_length": feat_configs["win_length"],
            "hop_length": feat_configs["hop_length"],
            "n_mels": feat_configs["n_mels"],
            "sample_rate": feat_configs["sample_rate"],
            "f_min": feat_configs["f_min"],
            "f_max": feat_configs["f_max"],
            "power": feat_configs["power"],
            "window_fn": torch.hamming_window, 
            "wkwargs": {"periodic": False}
        }

        # 只切分一次weak数据
        self.train_loader = None
        self.val_loader = None
        self.test_loader = None
        self.predict_loader = None

    def setup(self, stage=None):
        train_set_index = [x > 0 for x in self.batch_sizes]
        batch_sizes = [x for x in self.batch_sizes if x > 0]
        mode = sum([x > 0 for x in self.batch_sizes[:3]]) - 1
        # 训练集
        weak_train_set = []
        synth_train_set = []
        strong_train_set = []
        unsup_in_set = []
        unsup_out_set = []

        if train_set_index[0]:
            strong_train_set = [StrongSet(
                data_folder=self.path_configs["strong_folder"],
                encoder=self.label_encoder,
                tsv_file=self.path_configs["train_strong_real_tsv"],
                pad_to=self.feat_configs["audio_max_len"],
                mel_config=self.mel_config,
                extra_transform=self.extra_transform
            )]
        if train_set_index[1]:
            synth_train_set = [StrongSet(
                data_folder=self.path_configs["train_synth_folder"],
                encoder=self.label_encoder,
                tsv_file=self.path_configs["train_strong_synth_tsv"],
                pad_to=self.feat_configs["audio_max_len"],
                mel_config=self.mel_config,
                extra_transform=self.extra_transform
            )]
        if train_set_index[2]:
            weak_train_set = [WeakSet(
                data_folder=self.path_configs["train_weak_folder"],
                encoder=self.label_encoder,
                tsv_file=self.path_configs["train_weak_tsv"],
                pad_to=self.feat_configs["audio_max_len"],
                mel_config=self.mel_config,
                extra_transform=self.extra_transform
            )]
        # if train_set_index[3]:
        #     unsup_in_set = [UnlabeledSet(
        #         data_folder=self.path_configs["unlabeled_id_folder"],
        #         tsv_file=self.path_configs["unlabeled_id_tsv"],
        #         encoder=self.label_encoder,
        #         pad_to=self.feat_configs["audio_max_len"],
        #         mel_config=self.mel_config,
        #         extra_transform=self.extra_transform
        #     )]
        if train_set_index[3]:
            unsup_out_set = [ContrastiveSet(               
                data_folder=self.path_configs["unlabeled_od_folder"],
                tsv_file=self.path_configs["unlabeled_od_tsv"],
                encoder=self.label_encoder,
                pad_to=self.feat_configs["audio_max_len"],
                mel_config=self.mel_config,
                contrastive_aug=self.contrastive_aug,
                extra_transform=self.extra_transform
            )]
        train_set_list = strong_train_set + synth_train_set + weak_train_set + unsup_out_set

        # 验证集
        synth_val_set = StrongSet(
            data_folder=self.path_configs["val_strong_synth_folder"],
            encoder=self.label_encoder,
            tsv_file=self.path_configs["val_strong_synth_tsv"],
            pad_to=self.feat_configs["audio_max_len"],
            mel_config=self.mel_config,
            dur_file=self.path_configs["val_strong_synth_dur"],
            extra_transform=self.extra_transform,
            val=True
        )
        weak_val_set = WeakSet(
            data_folder=self.path_configs["train_weak_folder"],
            encoder=self.label_encoder,
            tsv_file=self.path_configs["val_weak_tsv"],
            pad_to=self.feat_configs["audio_max_len"],
            mel_config=self.mel_config,
            val=True,
            extra_transform=self.extra_transform
        )
        strong_val_set = StrongSet(
            data_folder=self.path_configs["strong_folder"],
            encoder=self.label_encoder,
            tsv_file=self.path_configs["val_strong_real_tsv"],
            pad_to=self.feat_configs["audio_max_len"],
            mel_config=self.mel_config,
            dur_file=self.path_configs["val_strong_real_dur"],
            extra_transform=self.extra_transform,
            val=True
        )
        devtest_set = StrongSet(
            data_folder=self.path_configs["test_folder"],
            encoder=self.label_encoder,
            tsv_file=self.path_configs["test_tsv"],
            pad_to=self.feat_configs["audio_max_len"],
            mel_config=self.mel_config,
            val=True,
            dur_file=self.path_configs["test_dur"],
            extra_transform=self.extra_transform
        )
        val_set_list = [weak_val_set, synth_val_set, strong_val_set, devtest_set] # 
        train_sampler = ConcatDatasetSampler(train_set_list, batch_sizes, shuffle=True, mode=mode)
        train_set = ConcatDataset(train_set_list)
        val_set = ConcatDataset(val_set_list)
        self.train_loader = DataLoader(
            train_set,
            sampler=train_sampler,
            collate_fn=partial(collate_fn, selfsl_batch=batch_sizes[-1] if train_set_index[3] else 0),
            batch_size=train_sampler.get_bsz(),
            num_workers=self.n_workers
        )
        self.val_loader = DataLoader(
            val_set,
            batch_size=self.val_batch_size,
            shuffle=False,
            drop_last=True,
            num_workers=self.n_workers,
            collate_fn=default_collate_func
        )

        self.test_loader = DataLoader(
            devtest_set,
            batch_size=self.val_batch_size,
            shuffle=False,
            num_workers=self.n_workers,
            collate_fn=default_collate_func
        )

        try:
            predict_set = UnlabeledSet(
                data_folder=self.path_configs["pred_folder"],
                tsv_file=self.path_configs["pred_tsv"],
                encoder=self.label_encoder,
                pad_to=self.feat_configs["audio_max_len"],
                mel_config=self.mel_config,
                extra_transform=self.extra_transform
            )
            self.predict_loader = DataLoader(
                predict_set,
                batch_size=self.val_batch_size,
                shuffle=False,
                num_workers=self.n_workers,
                collate_fn=default_collate_func
            )
        except:
            pass

    def train_dataloader(self):
        return self.train_loader

    def val_dataloader(self):
        return self.val_loader

    def test_dataloader(self):
        return self.test_loader

    def predict_dataloader(self):
        return self.predict_loader