import logging
from collections import OrderedDict

import torch

from wenet.branchformer.encoder import BranchformerEncoder
from wenet.e_branchformer.encoder import EBranchformerEncoder
from wenet.efficient_conformer.encoder import EfficientConformerEncoder
from wenet.kd_model.kd_loss import KD_CTC
from wenet.kd_model.kd_model import KD_model
from wenet.kd_model.teacher_student_class import TeacherStudentModel
from wenet.sanm.encoder import SANMEncoder
from wenet.squeezeformer.encoder import SqueezeformerEncoder
from wenet.transformer.cmvn import GlobalCMVN
from wenet.transformer.encoder import TransformerEncoder, ConformerEncoder
from wenet.utils.checkpoint import load_checkpoint
from wenet.utils.cmvn import load_cmvn
from wenet.kd_model import kd_utils
from gxl_ai_utils.utils import utils_file


def init_model(configs: dict):
    """
    得到kd 模型
    Args:
        configs:

    Returns:

    """
    # 首先加载paraformer的encoder
    paraformer_encoder_type = configs.get('paraformer_encoder', 'sanm')
    paraformer_encoder = None
    if paraformer_encoder_type == 'sanm':
        if configs['cmvn_file_paraformer'] is not None:
            mean, istd = load_cmvn(configs['cmvn_file_paraformer'], configs['is_json_cmvn'])
            global_cmvn = GlobalCMVN(
                torch.from_numpy(mean).float(),
                torch.from_numpy(istd).float())
        else:
            global_cmvn = None

        paraformer_encoder = SANMEncoder(
            global_cmvn=global_cmvn,
            **configs['paraformer_encoder_conf'])
        print(paraformer_encoder)
        # 加载checkpoint
        if configs['paraformer_checkpoint'] is not None:
            kd_utils.load_paraformer_encoder_origin_param(paraformer_encoder, configs)
    else:
        logging.error('not support paraformer_encoder_type: %s', paraformer_encoder_type)

    # 接着加载whisper的encoder
    input_dim = configs['input_dim']  # 在 check_modify_and_save_config 函数中写入
    vocab_size = configs['output_dim']  # 在init_dataset_and_dataloader函数中写入
    whisper_encoder_type = configs.get('whisper_encoder', 'transformer')
    whisper_encoder = None
    if whisper_encoder_type == 'transformer':
        whisper_encoder = TransformerEncoder(input_dim,
                                             global_cmvn=None,
                                             **configs['whisper_encoder_conf'])
        print(whisper_encoder)
        if configs['whisper_checkpoint'] is not None:
            kd_utils.load_whisper_encoder_origin_param(whisper_encoder, configs)
    else:
        logging.error('not support whisper_encoder_type: %s', whisper_encoder_type)

    # 接着加载student_encoder
    student_encoder_type = configs.get('student_encoder', 'conformer')
    student_encoder = None
    global_cmvn = None
    if student_encoder_type == 'conformer':
        student_encoder = ConformerEncoder(input_dim,
                                           global_cmvn=global_cmvn,
                                           **configs['student_encoder_conf'])
    elif student_encoder_type == 'squeezeformer':
        student_encoder = SqueezeformerEncoder(input_dim,
                                               global_cmvn=global_cmvn,
                                               **configs['student_encoder_conf'])
    elif student_encoder_type == 'efficientConformer':
        student_encoder = EfficientConformerEncoder(
            input_dim,
            global_cmvn=global_cmvn,
            **configs['student_encoder_conf'],
            **configs['student_encoder_conf']['efficient_conf']
            if 'efficient_conf' in configs['student_encoder_conf'] else {})
    elif student_encoder_type == 'branchformer':
        student_encoder = BranchformerEncoder(input_dim,
                                              global_cmvn=global_cmvn,
                                              **configs['student_encoder_conf'])
    elif student_encoder_type == 'e_branchformer':
        student_encoder = EBranchformerEncoder(input_dim,
                                               global_cmvn=global_cmvn,
                                               **configs['student_encoder_conf'])
    elif student_encoder_type == 'sanm':
        student_encoder = SANMEncoder(
            global_cmvn=global_cmvn,
            **configs['student_encoder_conf'])
    else:
        student_encoder = TransformerEncoder(input_dim,
                                             global_cmvn=global_cmvn,
                                             **configs['student_encoder_conf'])

    ctc4paraformer = KD_CTC(vocab_size, paraformer_encoder.output_size(),
                            blank_id=configs['ctc_conf']['ctc_blank_id']
                            if 'ctc_conf' in configs else 0
                            )
    paraformer_model = TeacherStudentModel(paraformer_encoder, ctc4paraformer)
    if configs.get('frozen_teacher', True):
        for p in paraformer_model.encoder.parameters():
            p.requires_grad = False
    ctc4whisper = KD_CTC(vocab_size, whisper_encoder.output_size(),
                         blank_id=configs['ctc_conf']['ctc_blank_id']
                         if 'ctc_conf' in configs else 0
                         )
    whisper_model = TeacherStudentModel(whisper_encoder, ctc4whisper)
    if configs.get('frozen_teacher', True):
        for p in whisper_model.encoder.parameters():
            p.requires_grad = False
    ctc4student = KD_CTC(vocab_size, student_encoder.output_size(),
                         blank_id=configs['ctc_conf']['ctc_blank_id']
                         if 'ctc_conf' in configs else 0
                         )
    student_model = TeacherStudentModel(student_encoder, ctc4student, freeze_encoder=False)

    kd_model = KD_model(paraformer_model, whisper_model, student_model)
    logging.info('init kd model success, 参数如下：-------------------------')
    num_params_pa = sum(p.numel() for p in kd_model.paraformer_model.parameters())
    logging.info('paraformer_model num_params: {}M'.format(num_params_pa/1e6))
    num_params_wh = sum(p.numel() for p in kd_model.whisper_model.parameters())
    logging.info('whisper_model num_params: {}M'.format(num_params_wh/1e6))
    num_params_stu = sum(p.numel() for p in kd_model.student_model.parameters())
    logging.info('student_model num_params: {}M'.format(num_params_stu/1e6))
    return kd_model, configs
