import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from gxl_ai_utils.utils import utils_file
import os

from wenet.transformer.encoder import TransformerEncoder
from wenet.transformer.swish import New_gelu4npu

speech_transformer = TransformerEncoder(
                input_size=2048,
                output_size=2048,
                attention_heads=8,
                linear_units=4096,
                num_blocks=8,
                dropout_rate=0.1,
                positional_dropout_rate=0.1,
                attention_dropout_rate=0.1,
                input_layer="linear",
                pos_enc_layer_type="abs_pos",
                normalize_before=True
            )
speech_head = torch.nn.Linear(2048, 4096)
speech_model = torch.nn.Sequential(
                TransformerEncoder(
                            input_size=2048,
                            output_size=2048,
                            attention_heads=8,
                            linear_units=4096,
                            num_blocks=8,
                            dropout_rate=0.1,
                            positional_dropout_rate=0.1,
                            attention_dropout_rate=0.1,
                            input_layer="linear",
                            pos_enc_layer_type="abs_pos",
                            normalize_before=True
                ),
                New_gelu4npu(),
                torch.nn.Linear(2048, 4096))


utils_file.print_model_size(speech_transformer)
utils_file.print_model_size(speech_head)
utils_file.print_model_size(speech_model)