import torch
import numpy as np
import torch.nn as nn
import transformers
import random
from transformers import GPT2Config, GPT2LMHeadModel
from transformers import LlamaConfig, LlamaModel, LlamaForCausalLM, GPT2LMHeadModel, GPT2Config
import torch.nn.functional as F
from tqdm import tqdm
import functools
from .arch import AttentionBlock
from gxl_ai_utils.utils import utils_file

class Acoustic_LLM_wrapper(nn.Module):

    def __init__(self, ):
        super().__init__()
        self.acoustic_LLM = get_LLM_decoder()

    def forward(self, batch, device):
        acoustic_tokens = batch['feats'].to(device)
        utils_file.logging_limit_print(f'acoustic_tokens.shape={acoustic_tokens.shape}')
        acoustic_tokens_lens = batch['feats_lengths'].to(device)
        utils_file.logging_limit_print(f'acoustic_tokens_lens.shape={acoustic_tokens_lens.shape}')
        # utils_file.logging_limit_print(f'acoustic_tokens_lens={acoustic_tokens_lens}')
        attention_mask = (acoustic_tokens != 4096).long()
        output = self.acoustic_LLM(input_ids=acoustic_tokens, labels=acoustic_tokens, attention_mask=attention_mask)
        loss = output['loss']  # 0维张量，纯数字
        return {"loss": loss}


def get_LLM_decoder(device=None):
    #  每层10MB， 20层为200MB
    config = GPT2Config(
        vocab_size=4096 + 7,
        # self.hparams.GPT2_vocab_size,  # padding=0, text ind, codec index, 2EOS
        max_position_embeddings=100 * 60,  # 支持的最长长度
        n_ctx=100 * 60,  # 等同于n_positions
        hidden_size=1024,  # 隐藏层dim
        intermediate_size=2048,
        num_hidden_layers=16,  # 多少层
        num_attention_heads=8,
        activation_function='gelu_new',
        resid_pdrop=0.1,
        embd_pdrop=0.1,
        attn_pdrop=0.1,
        layer_norm_epsilon=1e-05,
        initializer_range=0.02,
        pad_token_id=4096,
        bos_token_id_text=4097,
        eos_token_id_text=4098,
        bos_token_id_mel=4099,
        eos_token_id_mel=4100,
        bos_token_id=4101,
        eos_token_id=4102,
        output_hidden_states=True

    )
    model = GPT2LMHeadModel(config=config, )
    return model
