import logging

import torch.nn as nn
import yaml

from decoder import Decoder
from joiner import Joiner
from model import AsrModel
from scaling import ScheduledFloat
from subsampling import Conv2dSubsampling

from icefall.checkpoint import (
    average_checkpoints,
    average_checkpoints_with_averaged_model,
    find_checkpoints,
    load_checkpoint,
)

from zipformer import Zipformer2

from icefall.utils import (
    AttributeDict,
)
from fenlei_module.module_fenlei import ClassifierModel

def _to_int_tuple(s: str):
    return tuple(map(int, s.split(",")))

def get_encoder_embed(params: AttributeDict) -> nn.Module:
    # encoder_embed converts the input of shape (N, T, num_features)
    # to the shape (N, (T - 7) // 2, encoder_dims).
    # That is, it does two things simultaneously:
    #   (1) subsampling: T -> (T - 7) // 2
    #   (2) embedding: num_features -> encoder_dims
    # In the normal configuration, we will downsample once more at the end
    # by a factor of 2, and most of the encoder stacks will run at a lower
    # sampling rate.
    encoder_embed = Conv2dSubsampling(
        in_channels=params.feature_dim,
        out_channels=_to_int_tuple(params.encoder_dim)[0],
        dropout=ScheduledFloat((0.0, 0.3), (20000.0, 0.1)),
    )
    return encoder_embed


def get_encoder_model(params: AttributeDict) -> nn.Module:
    encoder = Zipformer2(
        output_downsampling_factor=2,
        downsampling_factor=_to_int_tuple(params.downsampling_factor),
        num_encoder_layers=_to_int_tuple(params.num_encoder_layers),
        encoder_dim=_to_int_tuple(params.encoder_dim),
        encoder_unmasked_dim=_to_int_tuple(params.encoder_unmasked_dim),
        query_head_dim=_to_int_tuple(params.query_head_dim),
        pos_head_dim=_to_int_tuple(params.pos_head_dim),
        value_head_dim=_to_int_tuple(params.value_head_dim),
        pos_dim=params.pos_dim,
        num_heads=_to_int_tuple(params.num_heads),
        feedforward_dim=_to_int_tuple(params.feedforward_dim),
        cnn_module_kernel=_to_int_tuple(params.cnn_module_kernel),
        dropout=ScheduledFloat((0.0, 0.3), (20000.0, 0.1)),
        warmup_batches=4000.0,
        causal=params.causal,
        chunk_size=_to_int_tuple(params.chunk_size),
        left_context_frames=_to_int_tuple(params.left_context_frames),
    )
    return encoder


def get_decoder_model(params: AttributeDict) -> nn.Module:
    decoder = Decoder(
        vocab_size=params.vocab_size,
        decoder_dim=params.decoder_dim,
        blank_id=params.blank_id,
        context_size=params.context_size,
    )
    return decoder


def get_joiner_model(params: AttributeDict) -> nn.Module:
    joiner = Joiner(
        encoder_dim=max(_to_int_tuple(params.encoder_dim)),
        decoder_dim=params.decoder_dim,
        joiner_dim=params.joiner_dim,
        vocab_size=params.vocab_size,
    )
    return joiner

def get_classifier_model(params: AttributeDict) -> nn.Module:
    classifier = ClassifierModel(
        params.classifier_dim,
        params.classifier_num_classes,
        params.classifier_num_heads,
        params.classifier_num_layers,
    )
    return classifier


def init_model(params: AttributeDict) -> nn.Module:
    encoder_embed = get_encoder_embed(params)
    encoder = get_encoder_model(params)
    # decoder = get_decoder_model(params)
    # joiner = get_joiner_model(params)
    classifier = get_classifier_model(params)
    # 将encoder的梯度消除
    for param in encoder.parameters():
        param.requires_grad = False
    encoder.eval()
    # 将encoder_embed的梯度消除
    for param in encoder_embed.parameters():
        param.requires_grad = False
    encoder_embed.eval()

    model = AsrModel(
        encoder_embed=encoder_embed,
        encoder=encoder,
        decoder=None,
        joiner=None,
        classifier=classifier,
        encoder_dim=int(max(params.encoder_dim.split(","))),
        decoder_dim=params.decoder_dim,
        vocab_size=params.vocab_size,
        use_ctc=False,
        use_transducer=False,
        use_classifier=True,
    )
    if params.init_checkpoint_path is not None:
        logging.info(f"Loading model from {params.init_checkpoint_path}")
        load_checkpoint(
            params.init_checkpoint_path, model=model
        )
        logging.info(f"Loaded model successfully from {params.init_checkpoint_path}")

    return model


def load_dict_from_yaml(file_path: str):
    with open(file_path, 'rt', encoding='utf-8') as f:
        dict_1 = yaml.load(f, Loader=yaml.FullLoader)
    return dict_1

def print_model_size(model: nn.Module):
    """
    打印模型的大小， 单位为M（1024*1024）
    :param model:
    :return:
    """
    if model is None:
        print('model is None')
    num_params = sum(p.numel() for p in model.parameters())
    print('the number of model params: {:,f}M'.format(num_params / 1024/ 1024),flush=True)

def print_model_trainable_size(model: nn.Module):
    """
    打印模型的大小， 单位为M（1024*1024）
    :param model:
    :return:
    """
    if model is None:
        print('model is None')
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('the number of model params: {:,f}M'.format(num_params / 1024/ 1024),flush=True)

def print_model_info(model: nn.Module):
    print(model)
    print('model',flush=True)
    print_model_size(model)
    print('model trainable',flush=True)
    print_model_trainable_size(model)
    print('encoder',flush=True)
    print_model_size(model.encoder)
    print('encoder_embed',flush=True)
    print_model_size(model.encoder_embed)
    print('decoder',flush=True)
    # print_model_size(model.decoder)
    print('joiner',flush=True)
    # print_model_size(model.joiner)
    print('ctc_output',flush=True)
    # print_model_size(model.ctc_output)
    print('classifier',flush=True)
    print_model_size(model.classifier)

if __name__ == "__main__":
    config = load_dict_from_yaml('conf/model2.yaml')
    param = AttributeDict(config)
    model = init_model(param)
    print_model_info(model)

