#!/usr/bin/env python3

from wenet.models.moe_comformer.embedding import (NoPositionalEncoding,
                                           PositionalEncoding,
                                           RelPositionalEncoding,
                                           RelPositionalEncodingNew)
from wenet.models.moe_comformer.subsampling import (LinearNoSubsampling,
                                                    Conv2dSubsampling4,
                                                    Conv2dSubsampling6,
                                                    Conv2dSubsampling8, Conv2dSubsampling6_simple2, Conv2dSubsampling16)

pos_enc_map = {
    'abs_pos' : PositionalEncoding,
    'rel_pos' : RelPositionalEncoding,
    'rel_pos_new' : RelPositionalEncodingNew,
    'no_pos' : NoPositionalEncoding,
}

subsample_map = {
    'linear' : LinearNoSubsampling,
    'conv2d' : Conv2dSubsampling4,
    'conv2d6' : Conv2dSubsampling6,
    'conv2d6_simple2' : Conv2dSubsampling6_simple2,
    'conv2d8' : Conv2dSubsampling8,
    'conv2d16' : Conv2dSubsampling16,
}


def init_pure_embedder(pos_enc_layer_type, output_size, dropout_rate=0):
    if pos_enc_layer_type not in pos_enc_map:
        raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)
    return pos_enc_map[pos_enc_layer_type](output_size, dropout_rate)


def init_embedder(input_size: int,
                  output_size: int = 256,
                  input_layer: str = 'conv2d',
                  pos_enc_layer_type: str = 'abs_pos',
                  dropout_rate: float = 0.1,
                  **kwargs):
    if input_layer not in subsample_map:
        raise ValueError("unknown input_layer: " + input_layer)
    return subsample_map[input_layer](
        input_size, output_size, dropout_rate,
        init_pure_embedder(pos_enc_layer_type, output_size, dropout_rate),
        **kwargs,
    )
