import torch
from torch.utils.data import DataLoader, RandomSampler, Subset, random_split
import numpy as np
from model import encoder_module
from data import loadwindpower, loadisdb, loadmonash, dataloader_collate, loaduea, loadbear
from experiment import exp_metrics

def model_factory(config, data):
    '''

    :param config:
    :param data: require a Dataset class of the specific data, such as Monash_Regression_Dataset in data.loadmonash
    :return:
    '''
    task = config.exp_task
    exp_type = config.exp_type
    #ts_dim = np.int(data.ts_dim * (1 + config.seg_topK) )
    ts_dim = np.int(data.ts_dim)
    ts_maxlen = config.ts_maxlen
    if ts_maxlen == None:
        try:
            ts_maxlen = data.ts_len
        except AttributeError as x:
            print("Data class does not define a maximum sequence length, so it must be defined with the script argument `max_seq_len`")
            raise x
    num_classes = data.num_classes
    if exp_type == 'supervised':
        if (task == "classification") or (task == "regression"):
            if config.exp_encoder == 'transformer':
                return encoder_module.TIIEncoderNetwork_Transformer(
                    ts_dim=ts_dim,
                    ts_maxlen=ts_maxlen,
                    embedding_dim=config.embedding_dim,
                    num_encoding_heads=config.num_heads,
                    num_encoding_layers=config.num_layers,
                    dim_feedforward=config.dim_feedforward,
                    num_classes=num_classes,
                    config=config,
                    dropout=config.dropout,
                    pos_encoding=config.pos_encoding,
                    activation=config.transformer_activation,
                    norm=config.normalization_layer,
                ).to(config.device)
            elif config.exp_encoder == 'cnn':
                return encoder_module.CausalCNNEncoder(
                    in_channels=ts_dim,
                    channels=config.channels,
                    depth=config.depth,
                    reduced_size=config.reduced_size,
                    out_channels=num_classes,
                    kernel_size=config.kernel_size
                ).to(config.device)
        else:
            raise ValueError("task '{}' does not exist".format(task))
    elif exp_type == 'pretrain':
        if (task == "classification") or (task == "regression"):
            if config.exp_encoder == 'transformer':
                raise ValueError("exp_encoder '{}' does not exist".format(config.exp_encoder))
            elif config.exp_encoder == 'cnn':
                return encoder_module.CausalCNNEncoderUnsupervised(
                    in_channels=ts_dim,
                    channels=config.channels,
                    depth=config.depth,
                    reduced_size=config.reduced_size,
                    out_channels=num_classes,
                    kernel_size=config.kernel_size
                ).to(config.device)
        else:
            raise ValueError("task '{}' does not exist".format(task))


def optim_factory(model, args):
    if args.global_reg:
        weight_decay = args.l2_reg
        output_reg = None
    else:
        weight_decay = 0
        output_reg = args.l2_reg
    optimizer = torch.optim.Adam(model.parameters(), lr=args.exp_lr, weight_decay=weight_decay)
    return optimizer

def data_factory(config, flag):
    # assign data source
    source_dict = {
        'Monash/Monash_UEA_UCR_Regression_Archive': 'MONASH',
        'Isdb': 'ISDB',
        'UEA/Multivariate2018_ts': 'UEA',
        'BearingsDetection': 'BEAR'
    }
    data_source = source_dict[config.data_path]
    data_dict = {
        'ISDB': loadisdb.Dataset_isdb,
        'WIND': loadwindpower.Dataset_windpower,
        'MONASH': loadmonash.Monash_Regression_Dataset,
        'UEA': loaduea.Dataset_UEA,
        'BEAR': loadbear.Dataset_bearing
    }
    Data = data_dict[data_source]
    if flag == 'TRAIN':
        shuffle_flag = True
        drop_last = False
        batch_size = config.exp_batchsize
    elif flag == 'TEST':
        shuffle_flag = False
        drop_last = False
        batch_size = config.exp_batchsize
    elif flag == 'VAL':
        shuffle_flag = True
        drop_last = False
        batch_size = 1
    else:
        raise ValueError('Unknown flag, TRAIN or VAL or TEST is required')

    dataset = Data(root_path=config.root_path,
                   data_path=config.data_path,
                   data_name=config.data_name,
                   flag=flag,
                   config=config)

    ts_maxlen = config.ts_maxlen
    if ts_maxlen == None:
        try:
            ts_maxlen = dataset.ts_len
        except AttributeError as x:
            print(
                "Data class does not define a maximum sequence length, so it must be defined with the script argument `max_seq_len`")
            raise x
    if flag == 'TRAIN':
        num_train_samples = np.int(len(dataset) * config.exp_trainratio)
        newds, _ = random_split(dataset, lengths=[num_train_samples, len(dataset)-num_train_samples])
    elif flag == 'TEST':
        newds = dataset
    dataloader = DataLoader(newds,
                            batch_size=batch_size,
                            shuffle=shuffle_flag,
                            num_workers=config.num_workers,
                            drop_last=drop_last,
                            collate_fn=lambda x: dataloader_collate.padding_attn_collate(x, max_len=ts_maxlen))
    # collate_fn=datacollate.collate_supervised
    return dataset, newds, dataloader




def pipeline_factor(config):
    pass
