from src.encoder import *
from src.decoder import *
from torch import nn


"""
统一在这里构建细分模型，主要是编码器和解码器
"""


def build_encoder(config):
    if config.enc.type == 'lstm':
        return BaseEncoder(
            input_size=config.feature_dim,
            hidden_size=config.enc.hidden_size,
            output_size=config.enc.output_size,
            n_layers=config.enc.n_layers,
            dropout=config.dropout,
            bidirectional=config.enc.bidirectional
        )
    elif config.enc.type == 'cov1d_lstm':
        return CNN_LSTM(
            input_size=config.feature_dim,
            kernal_size=config.enc.cnn_kernal_size,
            pad=config.enc.cnn_pad,
            rnn_input_size=config.enc.rnn_input_size,
            rnn_hidden_size=config.enc.rnn_hidden_size,
            output_size=config.enc.output_size,
            n_layers=config.enc.n_layers,
            dropout=config.dropout,
            bidirectional=config.enc.bidirectional
        )
    elif config.enc.type == 'deep_speech':
        return DeepSpeech(
            input_size=config.feature_dim,
            rnn_hidden_size=config.enc.hidden_size,
            rnn_hidden_layers=config.enc.n_layers,
            output_size=config.enc.output_size,
            cnn1_ksize=tuple([int(i) for i in config.enc.cnn1_ksize.split(",")]),
            cnn1_stride=tuple([int(i) for i in config.enc.k1_stride.split(",")]),
            cnn2_ksize=tuple([int(i) for i in config.enc.cnn2_ksize.split(",")]),
            cnn2_stride=tuple([int(i) for i in config.enc.k2_stride.split(",")]),
            bidirectional=config.enc.bidirectional if config.enc.bidirectional else False,
            input_sorted=config.enc.input_sorted if config.enc.input_sorted else True,
            lookahead_context=config.enc.lookahead_context if config.enc.lookahead_context else 3
        )
    elif config.enc.type == 'lstm_seq':
        return Seq2SeqEncoder(
            input_size=config.feature_dim,
            hidden_size=config.enc.hidden_size,
            n_layers=config.enc.n_layers,
            dropout=config.dropout,
            bidirectional=config.enc.bidirectional
        )
    elif config.enc.type == 'wavenet':
        return WaveNetEncoder(
            input_size=config.feature_dim,
            out_size=config.enc.output_size,
            n_layers=config.enc.n_layers,
            residual_size=config.enc.residual_size if config.enc.residual_size else 32,
            skip_size=config.enc.skip_size if config.enc.skip_size else 128,
            causal_stride=config.enc.causal_stride if config.enc.causal_stride else 1,
            causal_kernel_size=config.enc.causal_kernel_size if config.enc.causal_kernel_size else 3,
            dilated_kernel_size=config.enc.dilated_kernel_size if config.enc.dilated_kernel_size else 3,
            final_stride=config.enc.final_stride if config.enc.final_stride else 1,
            dilation_depth=config.enc.dilation_depth if config.enc.dilation_depth else 3
        )
    elif config.enc.type == 'transformer':
        return TransformerEncoder(
            feature_dim=config.feature_dim,
            key_size=config.enc.key_size,
            query_size=config.enc.query_size,
            value_size=config.enc.value_size,
            num_hiddens=config.enc.hidden_size,
            norm_shape=config.enc.norm_shape,
            ffn_num_input=config.enc.ffn_num_input,
            ffn_num_hiddens=config.enc.ffn_num_hiddens,
            num_heads=config.enc.num_heads,
            num_layers=config.enc.n_layers,
            dropout=config.dropout
        )
    elif config.enc.type == 'transformer_torch':
        return TransformerEncoder_torch(
            feature_dim=config.feature_dim,
            num_hiddens=config.enc.hidden_size,
            num_heads=config.enc.num_heads,
            num_layers=config.enc.n_layers,
            dropout=config.dropout
        )
    elif config.enc.type == 'transformer_torch_without_pos_encoding':
        return TransformerEncoder_torch_without_pos_encoding(
            feature_dim=config.feature_dim,
            num_hiddens=config.enc.hidden_size,
            num_heads=config.enc.num_heads,
            num_layers=config.enc.n_layers,
            dropout=config.dropout
        )
    else:
        raise NotImplementedError


def build_decoder(config):
    if config.dec.type == 'lstm':
        return BaseDecoder(
            embed_size=config.dec.hidden_size,
            vocab_size=config.vocab_size,
            hidden_size=config.dec.hidden_size,
            output_size=config.dec.output_size,
            n_layers=config.dec.n_layers,
            dropout=config.dropout,
            share_weight=config.share_weight
        )
    elif config.dec.type == 'lstm_seq':
        return LASLstmDecoder(
            embed_size=config.dec.embed_size,
            vocab_size=config.vocab_size,
            encoder_hidden_size=config.enc.hidden_size,
            decoder_hidden_size=config.dec.hidden_size,
            n_layers=config.dec.n_layers,
            dropout=config.dropout
        )
    elif config.dec.type == 'transformer':
        return TransformerDecoder(
            vocab_size=config.vocab_size,
            key_size=config.dec.key_size,
            query_size=config.dec.query_size,
            value_size=config.dec.value_size,
            num_hiddens=config.dec.hidden_size,
            norm_shape=config.dec.norm_shape,
            ffn_num_input=config.dec.ffn_num_input,
            ffn_num_hiddens=config.dec.ffn_num_hiddens,
            num_heads=config.dec.num_heads,
            num_layers=config.dec.n_layers,
            dropout=config.dropout
        )
    elif config.dec.type == 'transformer_torch':
        return TransformerDecoder_torch(
            vocab_size=config.vocab_size,
            num_hiddens=config.enc.hidden_size,
            num_heads=config.enc.num_heads,
            num_layers=config.enc.n_layers,
            dropout=config.dropout
        )
    else:
        raise NotImplementedError
