# coding=utf-8
# Copyright 2022 HuggingFace Inc. team and BigScience workshop.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Copyright (c) 2023, NVIDIA CORPORATION.  All rights reserved.

# Copyright (c) 2021 EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


"""PyTorch TELECHAT model."""

import warnings
from typing import Optional, Tuple, Union, List, Dict
from threading import Thread

import torch
import math
import copy
from torch import nn
import torch.utils.checkpoint
from torch.nn import functional as F
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from transformers.modeling_outputs import (
    BaseModelOutputWithPastAndCrossAttentions,
    CausalLMOutputWithCrossAttentions
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from transformers import GenerationConfig

from .configuration_telechat import TelechatConfig
from .generation_utils import History, TelechatIterTextStreamer

logger = logging.get_logger(__name__)

_CHECKPOINT_FOR_DOC = "telechat"
_CONFIG_FOR_DOC = "TelechatConfig"

TELECHAT_PRETRAINED_MODEL_ARCHIVE_LIST = []

try:
    from einops import rearrange
except ImportError:
    rearrange = None

use_flash_attn = True
try:
    from flash_attn.flash_attn_interface import flash_attn_unpadded_func
except ImportError:
    try:
        from flash_attn.flash_attn_interface import flash_attn_varlen_func as flash_attn_unpadded_func
    except ImportError:
        flash_attn_unpadded_func = None


class RotaryEmbedding(torch.nn.Module):
    # Extracted from: https://github.com/EleutherAI/gpt-neox
    def __init__(self, dim, config, base=10000,precision=torch.half):
        super().__init__()
        self.config = config
        self.dim = dim
        self.base = base
        self.max_seq_len_cached = None
        self.cos_cached = None
        self.sin_cached = None
        self.precision = precision

    def get_mscale(self, scale=1):
        if scale <= 1:
            return 1.0
        return 0.1 * math.log(scale) + 1.0

    def get_ntk_alpha(self, true_seq_len):
        context_value = math.log(true_seq_len / 8192, 2) + 1
        # ntk_alpha = 2 ** context_value - 1
        ntk_alpha = 2 ** math.ceil(context_value) - 1
        ntk_alpha = max(ntk_alpha, 1)
        return ntk_alpha

    def forward(self, x, seq_dim=0, seq_len=None):
        if seq_len is None:
            seq_len = x.shape[seq_dim]
        seq_len = max(seq_len, self.config.training_seqlen)
        ntk_alpha = self.get_ntk_alpha(seq_len)
        mscale = float(self.get_mscale(seq_len / self.config.training_seqlen))
        base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
        inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, device=x.device).float( )/ self.dim ))
        max_seq_len_cached = seq_len
        t = torch.arange(max_seq_len_cached, device=x.device, dtype=inv_freq.dtype)
        freqs = torch.einsum('i,j->ij', t, inv_freq)
        # Different from paper, but it uses a different permutation in order to obtain the same calculation
        emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
        if self.precision == torch.bfloat16:
            emb = emb.float()
        # [sx, 1 (b * np), hn]
        cos_cached = mscale *emb.cos()[:, None, :].half()
        sin_cached = mscale *emb.sin()[:, None, :].half()
        if self.precision == torch.bfloat16:
            cos_cached = cos_cached.bfloat16()
            sin_cached = sin_cached.bfloat16()
        return cos_cached[:seq_len, ...], sin_cached[:seq_len, ...]


# rotary pos emb helpers:
def rotate_half(x):
    x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
    return torch.cat((-x2, x1), dim=x1.ndim - 1)  # dim=-1 triggers a bug in earlier torch versions


def apply_rotary_pos_emb_torch(q, k, cos, sin, offset: int = 0):  # jitting fails with bf16
    cos, sin = cos[offset:q.shape[0] + offset, ...], sin[offset:q.shape[0] + offset, ...]
    return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)


class MixedFusedRMSNorm(nn.Module):
    # Extracted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
    def __init__(self, hidden_size, eps=1e-6):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(hidden_size))
        self.variance_epsilon = eps

    def forward(self, hidden_states):
        input_dtype = hidden_states.dtype
        hidden_states = hidden_states.to(torch.float32)
        variance = hidden_states.pow(2).mean(-1, keepdim=True)
        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
        return self.weight * hidden_states.to(input_dtype)


class FlashSelfAttention(torch.nn.Module):
    # Extracted from https://github.com/microsoft/Megatron-DeepSpeed/blob/main/megatron/model/transformer.py
    """Implement the scaled dot product attention with softmax.
    Arguments
    ---------
        softmax_scale: The temperature to use for the softmax attention.
                      (default: 1/sqrt(d_keys) where d_keys is computed at
                      runtime)
        attention_dropout: The dropout rate to apply to the attention
                           (default: 0.0)
    """

    def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0,
                 device=None, dtype=None):
        super().__init__()
        assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
                                                      'e.g., with pip install flash-attn')
        assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
        self.causal = causal
        self.softmax_scale = softmax_scale
        self.dropout_p = attention_dropout

    def forward(self, q, k, v):
        """Implements the multihead softmax attention.
        Arguments
        ---------
            q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
        """
        assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
        assert all((i.is_cuda for i in (q, k, v)))

        batch_size, seqlen_q = q.shape[0], q.shape[1]
        seqlen_k = k.shape[1]

        q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
        cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32,
                                    device=q.device)
        if self.training:
            # during training q,k,v always have same seqlen
            assert seqlen_k == seqlen_q

            is_causal = self.causal
            cu_seqlens_k = cu_seqlens_q
            dropout_p = self.dropout_p
        else:
            # turn off FA causal mask after first inference autoregressive iteration
            # only on first autoregressive step q,k,v have same seqlen
            is_causal = seqlen_q == seqlen_k
            cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32,
                                        device=q.device)
            dropout_p = 0

        output = flash_attn_unpadded_func(
            q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k,
            dropout_p=dropout_p,
            softmax_scale=self.softmax_scale, causal=is_causal
        )

        output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
        return output


def _make_causal_mask(
        input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int
) -> torch.BoolTensor:
    """
    Make causal mask used for self-attention.
    """
    batch_size, target_length = input_ids_shape
    mask = torch.empty((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device)
    # ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround
    seq_ids = torch.arange(target_length, device=device)
    mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :]

    if past_key_values_length > 0:
        mask[:, :past_key_values_length] = False

    expanded_mask = mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length)
    return expanded_mask


def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor:
    """
    Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`.
    """
    batch_size, src_length = mask.shape
    tgt_length = tgt_length if tgt_length is not None else src_length

    expanded_mask = ~(mask[:, None, None, :].to(torch.bool))
    return expanded_mask.expand(batch_size, 1, tgt_length, src_length)


def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
    """
    Dropout add function

    Args:
        x (`torch.tensor`, *required*):
            input tensor
        residual (`torch.tensor`, *required*):
            residual tensor
        prob (`float`, *required*):
            dropout probability
        training (`bool`, *required*):
            training mode
    """
    out = F.dropout(x, p=prob, training=training)
    out = residual + out
    return out


def telechat_gelu_forward(x: torch.Tensor) -> torch.Tensor:
    """
    Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to
    make the model jitable.

    Args:
        x (`torch.tensor`, *required*):
            input hidden states
    """
    return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))


def telechat_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
    """
    gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) +
    0.3989423 * x * torch.exp(-0.5 * x * x)

    Args:
        g (`torch.tensor`, *required*):
            gradient output tensor
        x (`torch.tensor`, *required*):
            input tensor
    """
    x = x[0]  # x is a tuple of 1 element, needs to unpack it first
    tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
    # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
    ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
    return ff * g


class GeLUFunction(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input: torch.Tensor) -> torch.Tensor:
        ctx.save_for_backward(input)
        return telechat_gelu_forward(input)

    @staticmethod
    def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
        input = ctx.saved_tensors
        tmp = telechat_gelu_back(grad_output, input)
        return tmp


class TelechatGelu(nn.Module):
    """
    TelechatBiasGelu wrapper function that make use of the simple function on inference mode to make the model
    torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly
    copied from Megatron-DeepSpeed code and adapted for our needs

    See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329
    """

    def __init__(self):
        super().__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.training:
            return GeLUFunction.apply(x)
        else:
            return telechat_gelu_forward(x)


class TelechatAttention(nn.Module):
    def __init__(self, config: TelechatConfig, layer_idx):
        super().__init__()
        self.kv_cache = None
        self.layer_idx = layer_idx

        self.hidden_size = config.hidden_size
        self.num_heads = config.n_head
        self.head_dim = self.hidden_size // self.num_heads
        self.split_size = self.hidden_size
        self.hidden_dropout = config.hidden_dropout
        self.config = config

        if self.head_dim * self.num_heads != self.hidden_size:
            raise ValueError(
                f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
                f" {self.num_heads})."
            )

        # Layer-wise attention scaling
        self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
        self.beta = 1.0

        self.num_key_value_heads = self.num_heads
        kv_projection_size = self.head_dim * self.num_key_value_heads
        self.num_key_value_groups = self.num_heads // self.num_key_value_heads
        self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.key_value = nn.Linear(self.hidden_size, kv_projection_size * 2, bias=False)
        self.dense = nn.Linear(self.hidden_size, self.hidden_size)
        self.attention_dropout = nn.Dropout(config.attention_dropout)
        self.rotary_emb = RotaryEmbedding(self.head_dim, config=config)

        self.core_attention_flash = FlashSelfAttention(
            causal=True, attention_dropout=config.attention_dropout
        )

        self.last_key_layer = None

    def repeat_kv(self, hidden_states, n_rep):
        slen, batch, num_key_value_heads_per_partition, head_dim = hidden_states.shape
        if n_rep == 1:
            return hidden_states
        hidden_states = hidden_states[:, :, :, None, :].expand(slen, batch, num_key_value_heads_per_partition, n_rep,
                                                               head_dim)
        return hidden_states.reshape(slen, batch, num_key_value_heads_per_partition * n_rep, head_dim)

    def split_tensor_along_last_dim(self,
                                    tensor: torch.Tensor,
                                    num_partitions: int,
                                    contiguous_split_chunks: bool = False,
                                    ):

        # Get the size and dimension.
        last_dim = tensor.dim() - 1
        last_dim_size = tensor.size()[last_dim] // num_partitions
        # Split.
        tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
        # Note: torch.split does not create contiguous tensors by default.
        if contiguous_split_chunks:
            return tuple(chunk.contiguous() for chunk in tensor_list)

        return tensor_list

    def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
        batch_size_and_num_heads, seq_length, _ = x.shape
        batch_size = batch_size_and_num_heads // self.num_heads
        x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
        x = x.permute(0, 2, 1, 3)
        return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)

    def forward(
            self,
            hidden_states: torch.Tensor,
            residual: torch.Tensor,
            attention_mask: torch.Tensor,
            layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
            use_cache: bool = False,
            output_attentions: bool = False,
    ):
        hidden_states = hidden_states.transpose(1, 0)
        query_layer = self.query(hidden_states)
        new_tensor_shape = query_layer.size()[:-1] + \
                           (self.num_heads,
                            self.head_dim)
        query_layer = query_layer.view(*new_tensor_shape)

        mixed_kv_layer = self.key_value(hidden_states)
        new_tensor_shape = mixed_kv_layer.size()[:-1] + \
                           (self.num_key_value_heads,
                            2 * self.head_dim)
        mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
        (key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_kv_layer, 2)

        output_size = (query_layer.size(1),
                       query_layer.size(2),
                       query_layer.size(0),
                       key_layer.size(0))

        query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
        key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)

        apply_rotary_fn = apply_rotary_pos_emb_torch

        seq_len = key_layer.shape[0]
        offset = 0

        if use_cache and layer_past != None:
            past_key, past_value = layer_past
            offset = past_key.shape[0]
            seq_len += offset

        cos, sin = self.rotary_emb(value_layer)

        query_layer, key_layer = apply_rotary_fn(query_layer, key_layer, cos, sin, offset=offset)
        if use_cache:
            if layer_past != None:
                past_key, past_value = layer_past
                key_layer = torch.cat((past_key, key_layer[-1, ...].unsqueeze(0)), dim=0)
                value_layer = torch.cat((past_value, value_layer[-1, ...].unsqueeze(0)), dim=0)
            layer_past = key_layer, value_layer
        s, bz, head, dim = value_layer.shape
        s_key = key_layer.shape[0]
        s_query = query_layer.shape[0]
        query_layer = query_layer.reshape((s_query, bz, head, dim))
        key_layer = key_layer.reshape((s_key, bz, head, dim))

        if self.config.flash_attn:
            q, k, v = [rearrange(x, 's b ... -> b s ...').contiguous() for x in
                       (query_layer, key_layer, value_layer)]
            context_layer = self.core_attention_flash(q, k, v)
            context_layer = rearrange(context_layer, 'b s h d -> b s (h d)').contiguous()
        else:
            ##[sq, b, np, hn] -> [sq, b * np, hn]
            query_layer = query_layer.reshape(s_query, bz * self.num_heads, dim)
            # [sk, b, np, hn] -> [sk, b * np, hn]
            key_layer = key_layer.reshape(s_key, bz * self.num_heads, dim)
            matmul_result = self.inv_norm_factor * torch.einsum('bik,bkj->bij', query_layer.transpose(0, 1),
                                                                key_layer.transpose(0, 1).transpose(1, 2))

            attention_scores = matmul_result.view(bz, self.num_heads, s_query, s_key)

            input_dtype = attention_scores.dtype
            if input_dtype == torch.float16 or input_dtype == torch.bfloat16:
                attention_scores = attention_scores.to(torch.float)
            attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min)
            attention_probs = F.softmax(attn_weights, dim=-1).to(input_dtype)  ##dtype = torch.float32
            attention_probs = self.attention_dropout(attention_probs)
            attention_probs_reshaped = attention_probs.view(bz * self.num_heads, s_query, s_key)

            value_layer = value_layer.reshape(s_key, bz * self.num_heads, dim)
            context_layer = torch.bmm(attention_probs_reshaped, value_layer.transpose(0, 1))
            context_layer = self._merge_heads(context_layer)

        output_tensor = self.dense(context_layer)

        output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
        present = None
        outputs = (output_tensor, present)
        if output_attentions:
            outputs += (attention_probs,)

        return output_tensor, layer_past


class TelechatMLP(nn.Module):
    def __init__(self, config: TelechatConfig):
        super().__init__()
        hidden_size = config.hidden_size
        self.gate_proj = nn.Linear(hidden_size, config.ffn_hidden_size, bias=False)
        self.up_proj = nn.Linear(hidden_size, config.ffn_hidden_size, bias=False)
        self.down_proj = nn.Linear(config.ffn_hidden_size, hidden_size, bias=True)
        self.hidden_dropout = config.hidden_dropout

    def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
        intermediate_output = self.down_proj(F.silu(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
        output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
        return output


class TelechatBlock(nn.Module):
    def __init__(self, config: TelechatConfig, layer_idx):
        super().__init__()
        hidden_size = config.hidden_size

        self.input_layernorm = MixedFusedRMSNorm(hidden_size, eps=config.layer_norm_epsilon)
        self.num_heads = config.n_head
        self.layer_idx = layer_idx
        self.self_attention = TelechatAttention(config, layer_idx)
        self.post_attention_layernorm = MixedFusedRMSNorm(hidden_size, eps=config.layer_norm_epsilon)

        self.mlp = TelechatMLP(config)

        self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
        self.hidden_dropout = config.hidden_dropout

    def forward(
            self,
            hidden_states: torch.Tensor,
            attention_mask: torch.Tensor,
            layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
            use_cache: bool = False,
            output_attentions: bool = False,
    ):
        layernorm_output = self.input_layernorm(hidden_states)
        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = hidden_states

        attn_outputs = self.self_attention(
            layernorm_output,
            residual,
            layer_past=layer_past,
            attention_mask=attention_mask,
            use_cache=use_cache,
            output_attentions=output_attentions,
        )

        attention_output = attn_outputs[0]
        outputs = attn_outputs[1:]
        layernorm_output = self.post_attention_layernorm(attention_output)

        if self.apply_residual_connection_post_layernorm:
            residual = layernorm_output
        else:
            residual = attention_output
        output = self.mlp(layernorm_output, residual)

        if use_cache:
            outputs = (output,) + outputs
        else:
            outputs = (output,) + outputs[1:]

        return outputs


class TelechatPreTrainedModel(PreTrainedModel):
    config_class = TelechatConfig
    base_model_prefix = "transformer"
    supports_gradient_checkpointing = True
    _no_split_modules = ["TelechatBlock"]
    _skip_keys_device_placement = "past_key_values"

    def __init__(self, *inputs, **kwargs):
        super().__init__(*inputs, **kwargs)

    def _init_weights(self, module: nn.Module):
        """Initialize the weights."""
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()

        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()

        elif isinstance(module, LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)

    def _set_gradient_checkpointing(self, module: nn.Module, value: bool = False):
        if isinstance(module, TelechatModel):
            module.gradient_checkpointing = value


class TelechatModel(TelechatPreTrainedModel):
    def __init__(self, config: TelechatConfig):
        super().__init__(config)

        self.embed_dim = config.hidden_size
        self.num_heads = config.n_head
        self.config = config
        self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
        if self.config.embed_layernorm:
            self.word_embeddings_layernorm = MixedFusedRMSNorm(self.embed_dim, eps=config.layer_norm_epsilon)

        self.h = nn.ModuleList([TelechatBlock(config, _) for _ in range(config.num_hidden_layers)])
        self.ln_f = MixedFusedRMSNorm(self.embed_dim, eps=config.layer_norm_epsilon)
        self.gradient_checkpointing = False
        self.post_init()

    def get_input_embeddings(self):
        return self.word_embeddings

    def _prepare_attn_mask(
            self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int
    ) -> torch.BoolTensor:
        combined_attention_mask = None
        device = attention_mask.device
        _, src_length = input_shape

        if src_length > 1:
            combined_attention_mask = _make_causal_mask(
                input_shape, device=device, past_key_values_length=past_key_values_length
            )
        expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length)
        combined_attention_mask = (
            expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask
        )

        return combined_attention_mask

    def set_input_embeddings(self, new_embeddings: torch.Tensor):
        self.word_embeddings = new_embeddings

    def forward(
            self,
            input_ids: Optional[torch.LongTensor] = None,
            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
            attention_mask: Optional[torch.Tensor] = None,
            inputs_embeds: Optional[torch.LongTensor] = None,
            use_cache: Optional[bool] = None,
            output_attentions: Optional[bool] = None,
            output_hidden_states: Optional[bool] = None,
            return_dict: Optional[bool] = None,
            **deprecated_arguments,
    ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:

        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if input_ids is not None:
            batch_size, seq_length = input_ids.shape
        elif inputs_embeds is not None:
            batch_size, seq_length, _ = inputs_embeds.shape

        if past_key_values is None:
            past_key_values = tuple([None] * len(self.h))

        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        hidden_states = inputs_embeds

        if self.config.embed_layernorm:
            hidden_states = self.word_embeddings_layernorm(inputs_embeds)

        presents = () if use_cache else None
        all_self_attentions = () if output_attentions else None
        all_hidden_states = () if output_hidden_states else None

        if self.gradient_checkpointing and self.training:
            if use_cache:
                use_cache = False

        seq_length_with_past = seq_length
        past_key_values_length = 0
        if past_key_values[0] is not None:
            past_key_values_length = past_key_values[0][0].shape[2]
            seq_length_with_past = seq_length_with_past + past_key_values_length
        if attention_mask is None:
            attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
        else:
            attention_mask = attention_mask.to(hidden_states.device)
        causal_mask = self._prepare_attn_mask(
            attention_mask,
            input_shape=(batch_size, seq_length),
            past_key_values_length=past_key_values_length,
        )

        for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            if self.gradient_checkpointing and self.training:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        # None for past_key_value
                        return module(*inputs, use_cache=use_cache, output_attentions=output_attentions)

                    return custom_forward

                outputs = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(block),
                    hidden_states,
                    causal_mask,
                    layer_past,
                )
            else:
                outputs = block(
                    hidden_states,
                    layer_past=layer_past,
                    attention_mask=causal_mask,
                    use_cache=use_cache,
                    output_attentions=output_attentions,
                )

            hidden_states = outputs[0]
            if use_cache is True:
                presents = presents + (outputs[1],)

            if output_attentions:
                all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
        hidden_states = self.ln_f(hidden_states)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=hidden_states,
            past_key_values=presents,
            hidden_states=all_hidden_states,
            attentions=all_self_attentions,
        )


class TelechatForCausalLM(TelechatPreTrainedModel):
    # _tied_weights_keys = ["lm_head.weight"]
    _keys_to_ignore_on_load_missing = [r"lm_head.weight"]

    def __init__(self, config: TelechatConfig):
        super().__init__(config)
        self.transformer = TelechatModel(config)
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
        self.post_init()

    def get_output_embeddings(self):
        return self.lm_head

    def set_output_embeddings(self, new_embeddings: torch.Tensor):
        self.lm_head = new_embeddings

    def prepare_inputs_for_generation(
            self,
            input_ids: torch.LongTensor,
            past_key_values: Optional[torch.Tensor] = None,
            attention_mask: Optional[torch.Tensor] = None,
            inputs_embeds: Optional[torch.Tensor] = None,
            **kwargs,
    ) -> dict:
        if past_key_values:
            input_ids = input_ids[:, -1].unsqueeze(-1)
        if inputs_embeds is not None and past_key_values is None:
            model_inputs = {"inputs_embeds": inputs_embeds}
        else:
            model_inputs = {"input_ids": input_ids}

        model_inputs.update(
            {
                "past_key_values": past_key_values,
                "use_cache": kwargs.get("use_cache"),
                "attention_mask": attention_mask,
            }
        )
        return model_inputs

    def forward(
            self,
            input_ids: Optional[torch.LongTensor] = None,
            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
            attention_mask: Optional[torch.Tensor] = None,
            inputs_embeds: Optional[torch.Tensor] = None,
            labels: Optional[torch.Tensor] = None,
            use_cache: Optional[bool] = None,
            output_attentions: Optional[bool] = None,
            output_hidden_states: Optional[bool] = None,
            return_dict: Optional[bool] = None,
            **deprecated_arguments,
    ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:

        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        transformer_outputs = self.transformer(
            input_ids,
            past_key_values=past_key_values,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        hidden_states = transformer_outputs[0]
        lm_logits = self.lm_head(hidden_states)

        loss = None
        if labels is not None:
            labels = labels.to(lm_logits.device)
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            batch_size, seq_length, vocab_size = shift_logits.shape
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(
                shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
            )

        if not return_dict:
            output = (lm_logits,) + transformer_outputs[1:]
            return ((loss,) + output) if loss is not None else output

        return CausalLMOutputWithCrossAttentions(
            loss=loss,
            logits=lm_logits,
            past_key_values=transformer_outputs.past_key_values,
            hidden_states=transformer_outputs.hidden_states,
            attentions=transformer_outputs.attentions,
        )

    def chat(self, tokenizer, question: str = '', history: Union[List[Dict], History] = None, stream: bool = False,
             generation_config: Optional[GenerationConfig] = None, **kwargs):
        """
        Args:
            tokenizer:  the tokenizer of  telechat
            question: question which the model reply in this turn
            history: history which will format the input for telechat
            stream: if return the full text at last or yield the text in token
            generation_config:  configuration for generation
            **kwargs: args which will update the generation config or pass to model forward
        """
        generation_config = generation_config or self.generation_config
        if not generation_config:
            logger.error("generation_config is None")
            raise ValueError("generation_config must not be None")
        if not question:
            logger.error("question is empty")
            raise ValueError("question must not be empty")
        if history is None:
            history = []

        # we update and check generate_config here for building inputs.

        generation_config = copy.deepcopy(generation_config)
        user_id = generation_config.user_token_id
        bot_id = generation_config.bot_token_id
        model_kwargs = generation_config.update(**kwargs)
        generation_config.validate()

        # transfer to History
        if not isinstance(history, History):
            history = History(tokenizer, history)

        inputs = self.build_inputs_for_chat(tokenizer, question, history, generation_config, user_id, bot_id)
        history.append({"role": "user", "content": question})
        if stream:
            streamer = TelechatIterTextStreamer(tokenizer, history,skip_prompt=True)
            Thread(target=self.generate, kwargs=dict(
                inputs=inputs.to(self.device), streamer=streamer,
                generation_config=generation_config, **model_kwargs
            )).start()
            return streamer
        else:
            outputs = self.generate(inputs.to(self.device), generation_config=generation_config, **model_kwargs)
            response = tokenizer.decode(outputs[0][len(inputs[0]):-1])
            history.append({"role": "bot", "content": response})
            return response, history

    def build_inputs_for_chat(self, tokenizer, question, history, generation_config, usr_id, bot_id):
        """
        check history and  build inputs here
        """
        # first tokenize question
        q_token = tokenizer(question)
        qa_history = copy.deepcopy(history)

        # get the max length we should build our inputs in
        model_max_length = self.config.seq_length
        build_max_length = max(0, model_max_length - generation_config.max_new_tokens) \
            if generation_config.max_new_tokens else max(0, generation_config.max_length)
        if build_max_length < 3:
            logger.warning("the model can not meet the  requirements of input length,Please check config")
            raise ValueError("")

        # trunc left
        input_tokens = [usr_id] + q_token["input_ids"][-build_max_length + 1:] + [bot_id]
        length = len(input_tokens)

        while len(qa_history) != 0:
            message = qa_history.pop()
            if message["role"] == "user":
                tokens = [usr_id] + message["input_ids"]
            elif message["role"] == "bot":
                tokens = [bot_id] + message["input_ids"] + [generation_config.eos_token_id]
            else:
                tokens = []
            if len(tokens) + length >= build_max_length:
                break
            else:
                input_tokens = tokens + input_tokens

        return torch.tensor([input_tokens], dtype=torch.int64)


class OptimizedTelechatAttention(TelechatAttention):
    def __init__(self, config: TelechatConfig, layer_idx):
        super().__init__(config, layer_idx)
        # 初始化线程池和动态调度参数
        self.min_threads = 4
        self.max_threads = 64
        self.thread_pool = ThreadPoolExecutor(max_workers=self.max_threads)
        self.batch_size_threshold = 16
        
        # 是否启用优化
        self.use_vector = True
        self.use_parallel = True
        
        # 性能监控
        self.perf_stats = {
            'attention_time': [],
            'softmax_time': [],
            'thread_count': [],
            'batch_size': [],
            'sequence_length': []
        }

        
    def parallel_attention_compute(self, query: torch.Tensor, key: torch.Tensor, 
                                 value: torch.Tensor, attention_mask: torch.Tensor):
        """并行化attention计算"""
        batch_size, num_heads, seq_length, head_dim = query.shape
        
        # 1. 计算每个线程处理的序列长度
        chunk_size = max(64, seq_length // self.max_threads)  # 最小块大小为64
        num_chunks = (seq_length + chunk_size - 1) // chunk_size
        
        # 2. 分块并并行计算attention scores
        futures = []
        for i in range(num_chunks):
            start_idx = i * chunk_size
            end_idx = min((i + 1) * chunk_size, seq_length)
            
            q_chunk = query[:, :, start_idx:end_idx]
            k_chunk = key[:, :, :end_idx]  # 需要之前的所有key
            v_chunk = value[:, :, :end_idx]  # 需要之前的所有value
            mask_chunk = attention_mask[:, :, start_idx:end_idx, :end_idx]
            
            futures.append(
                self.thread_pool.submit(
                    self.attention_chunk_forward,
                    q_chunk, k_chunk, v_chunk, mask_chunk
                )
            )
        
        # 3. 收集并合并结果
        chunks = [f.result() for f in futures]
        return torch.cat(chunks, dim=2)  # 在序列维度拼接

    def attention_chunk_forward(self, query_chunk: torch.Tensor, key_chunk: torch.Tensor,
                              value_chunk: torch.Tensor, mask_chunk: torch.Tensor):
        """处理单个attention块"""
        # 1. 优化内存布局
        query_chunk = self.optimize_tensor_layout(query_chunk)
        key_chunk = self.optimize_tensor_layout(key_chunk)
        value_chunk = self.optimize_tensor_layout(value_chunk)
        
        # 2. 计算attention scores
        attention_scores = self.vector_attention_scores(query_chunk, key_chunk)
        attention_scores = attention_scores * self.inv_norm_factor
        
        # 3. 应用mask
        attention_scores = torch.masked_fill(
            attention_scores,
            mask_chunk,
            torch.finfo(attention_scores.dtype).min
        )
        
        # 4. 计算softmax
        attention_probs = self.vector_softmax(attention_scores)
        attention_probs = self.attention_dropout(attention_probs)
        
        # 5. 计算输出
        context_layer = self.vector_matmul(attention_probs, value_chunk)
        
        return context_layer

    def optimize_tensor_layout(self, tensor: torch.Tensor) -> torch.Tensor:
        """优化张量内存布局"""
        # 1. 确保连续内存
        tensor = tensor.contiguous()
        
        # 2. 调整维度顺序以优化访问模式
        # [batch, heads, seq, dim] -> [seq, batch, heads, dim]
        tensor = tensor.permute(2, 0, 1, 3).contiguous()
        
        # 3. 内存对齐
        if tensor.stride(-1) % 32 != 0:  # 假设我们想要32字节对齐
            pad_size = 32 - (tensor.size(-1) % 32)
            tensor = F.pad(tensor, (0, pad_size))
        
        return tensor

    def vector_attention_scores(self, query: torch.Tensor, key: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化attention scores计算"""
        batch_size, num_heads, seq_len, head_dim = query.shape
        scores = torch.zeros(batch_size, num_heads, seq_len, seq_len, device=query.device)
        
        asm_code = """
            # 设置向量长度
            vsetvli t0, %0, e16, m8
            
            # 加载query和key
            vle16.v v0, (%1)
            vle16.v v8, (%2)
            
            # 计算点积
            vfdot.vv v16, v0, v8
            
            # 累加结果
            vfadd.vv v24, v24, v16
            
            # 存储结果
            vse16.v v24, (%3)
        """
        return scores

    def optimized_attention(self, query_layer, key_layer, value_layer, attention_mask):
        """使用优化后的attention计算"""
        # 1. 并行计算attention
        context_layer = self.parallel_attention_compute(
            query_layer, key_layer, value_layer, attention_mask
        )
        
        # 2. 优化输出投影
        context_layer = self.vector_matmul(
            context_layer,
            self.dense.weight
        )
        
        if self.dense.bias is not None:
            context_layer = context_layer + self.dense.bias
            
        return context_layer

    def vector_matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化矩阵乘法"""
        m, k = a.shape
        k, n = b.shape
        result = torch.zeros((m, n), dtype=a.dtype, device=a.device)
        
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            add %1, %1, t0
            vle16.v v8, (%2) 
            add %2, %2, t0
            vfmacc.vv v16, v0, v8
            vse16.v v16, (%3)
            add %3, %3, t0
        """
        return result

    def vector_softmax(self, x: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化softmax计算"""
        # 1. 计算最大值
        x_max = torch.max(x, dim=-1, keepdim=True)[0]
        x = x - x_max  # 数值稳定性
        
        # 2. 计算exp
        x_exp = self.vector_exp(x)
        
        # 3. 计算sum
        sum_exp = torch.sum(x_exp, dim=-1, keepdim=True)
        
        # 4. 归一化
        return self.vector_div(x_exp, sum_exp)

    def vector_exp(self, x: torch.Tensor) -> torch.Tensor:
        """向量化exp计算"""
        result = torch.zeros_like(x)
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            vfmul.vf v2, v0, 1.442695
            vfcvt.x.f.v v4, v2
            vfcvt.f.x.v v6, v4
            vfsub.vv v8, v2, v6
            vfmul.vv v10, v8, v8
            vfmul.vv v12, v10, v8
            vfmul.vf v10, v10, 0.5
            vfmul.vf v12, v12, 0.166667
            vfadd.vf v14, v8, 1.0
            vfadd.vv v14, v14, v10
            vfadd.vv v14, v14, v12
            vse16.v v14, (%2)
        """
        return result

    def vector_div(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
        """向量化除法"""
        result = torch.zeros_like(x)
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            vle16.v v2, (%2)
            vfdiv.vv v4, v0, v2
            vse16.v v4, (%3)
        """
        return result

    def vector_gelu(self, x: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化GELU激活函数"""
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            # GELU近似计算
            # GELU(x) ≈ 0.5x * (1 + tanh(√(2/π) * (x + 0.044715x^3)))
            
            # 1. 计算x^3
            vfmul.vv v2, v0, v0  # x^2
            vfmul.vv v2, v2, v0  # x^3
            
            # 2. 计算0.044715x^3
            vfmul.vf v4, v2, 0.044715
            
            # 3. 计算x + 0.044715x^3
            vfadd.vv v6, v0, v4
            
            # 4. 计算√(2/π) * (...)
            vfmul.vf v6, v6, 0.797885  # √(2/π) ≈ 0.797885
            
            # 5. 计算tanh
            # tanh(x) = (exp(2x) - 1)/(exp(2x) + 1)
            vfmul.vf v8, v6, 2.0
            vfexp.v v10, v8
            vfadd.vf v12, v10, -1.0  # exp(2x) - 1
            vfadd.vf v14, v10, 1.0   # exp(2x) + 1
            vfdiv.vv v16, v12, v14    # tanh
            
            # 6. 计算1 + tanh
            vfadd.vf v18, v16, 1.0
            
            # 7. 计算0.5x * (...)
            vfmul.vf v20, v0, 0.5
            vfmul.vv v22, v20, v18
            
            vse16.v v22, (%2)
        """
        return x  # 实际使用时返回向量化计算的结果

    def vector_layer_norm(self, x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float = 1e-5) -> torch.Tensor:
        """使用RISC-V向量指令优化LayerNorm计算"""
        # 1. 计算均值
        mean = torch.mean(x, dim=-1, keepdim=True)
        
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)      # 加载输入
            vle16.v v2, (%2)      # 加载均值
            
            # 减去均值
            vfsub.vv v4, v0, v2
            
            # 计算方差
            vfmul.vv v6, v4, v4   # 平方
            vfsum.vs v8, v6       # 求和
            vfdiv.vf v10, v8, %3  # 除以长度
            
            # 标准化
            vfsqrt.v v12, v10     # 计算标准差
            vfadd.vf v12, v12, %4 # 加上eps
            vfdiv.vv v14, v4, v12 # 除以标准差
            
            # 缩放和偏移
            vle16.v v16, (%5)     # 加载weight
            vle16.v v18, (%6)     # 加载bias
            vfmul.vv v20, v14, v16
            vfadd.vv v22, v20, v18
            
            vse16.v v22, (%7)     # 存储结果
        """
        return x  # 实际使用时返回向量化计算的结果

    def vector_attention_mask(self, attention_scores: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化attention mask应用"""
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)      # 加载attention scores
            vle16.v v2, (%2)      # 加载mask
            
            # 将mask中的0转换为大的负值
            vfcmp.vf v4, v2, 0.0
            vfmul.vf v6, v4, -1e9
            
            # 应用mask
            vfadd.vv v8, v0, v6
            
            vse16.v v8, (%3)
        """
        return attention_scores  # 实际使用时返回向量化计算的结果

    def vector_dropout(self, x: torch.Tensor, p: float, training: bool) -> torch.Tensor:
        """使用RISC-V向量指令优化dropout"""
        if not training or p <= 0:
            return x
            
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            
            # 生成随机数
            vrand.v v2
            
            # 创建dropout mask
            vfcmp.vf v4, v2, %2   # 比较随机数和dropout概率
            
            # 应用mask并缩放
            vfmul.vv v6, v0, v4
            vfmul.vf v6, v6, %3   # 缩放系数 = 1/(1-p)
            
            vse16.v v6, (%4)
        """
        return x  # 实际使用时返回向量化计算的结果
    def vector_attention_scores(self, query: torch.Tensor, key: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化attention scores计算"""
        batch_size, num_heads, seq_len, head_dim = query.shape
        scores = torch.zeros(batch_size, num_heads, seq_len, seq_len, device=query.device)
        
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            vle16.v v8, (%2)
            vfdot.vv v16, v0, v8
            vfadd.vv v24, v24, v16
            vse16.v v24, (%3)
        """
        return scores

    def vector_matmul_blocked(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
        """使用分块和向量化优化矩阵乘法"""
        m, k = a.shape
        k, n = b.shape
        result = torch.zeros((m, n), dtype=a.dtype, device=a.device)
        
        # 分块大小，根据缓存大小调整
        BLOCK_SIZE_M = 64
        BLOCK_SIZE_N = 64
        BLOCK_SIZE_K = 64
        
        # 对矩阵进行分块
        for i in range(0, m, BLOCK_SIZE_M):
            for j in range(0, n, BLOCK_SIZE_N):
                block_c = torch.zeros((min(BLOCK_SIZE_M, m-i), 
                                    min(BLOCK_SIZE_N, n-j)), 
                                   dtype=a.dtype, device=a.device)
                
                for k_block in range(0, k, BLOCK_SIZE_K):
                    # 获取当前块
                    block_a = a[i:min(i+BLOCK_SIZE_M, m), 
                              k_block:min(k_block+BLOCK_SIZE_K, k)]
                    block_b = b[k_block:min(k_block+BLOCK_SIZE_K, k), 
                              j:min(j+BLOCK_SIZE_N, n)]
                    
                    # 向量化计算当前块的乘法
                    block_c += self.vector_matmul_kernel(block_a, block_b)
                
                result[i:min(i+BLOCK_SIZE_M, m), 
                      j:min(j+BLOCK_SIZE_N, n)] = block_c
        
        return result

    def vector_matmul_kernel(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
        """矩阵乘法的向量化核心计算"""
        m, k = a.shape
        k, n = b.shape
        c = torch.zeros((m, n), dtype=a.dtype, device=a.device)
        
        asm_code = """
            # 设置向量长度
            vsetvli t0, %4, e16, m8
            
            # 外层循环 - m维度
        1:  
            mv t1, %3              # 重置n维度计数器
            
            # 中层循环 - n维度
        2:
            mv t2, %2              # 重置k维度计数器
            vxor.vv v16, v16, v16  # 清零累加寄存器
            
            # 内层循环 - k维度
        3:
            # 加载A矩阵元素到向量寄存器
            vle16.v v0, (%0)
            add %0, %0, t0
            
            # 加载B矩阵元素到向量寄存器
            vle16.v v8, (%1)
            add %1, %1, t0
            
            # 向量-向量乘加
            vfmacc.vv v16, v0, v8
            
            addi t2, t2, -1
            bnez t2, 3b
            
            # 存储结果
            vse16.v v16, (%5)
            add %5, %5, t0
            
            addi t1, t1, -1
            bnez t1, 2b
            
            addi %6, %6, -1
            bnez %6, 1b
        """
        return c

    def optimize_matmul_layout(self, a: torch.Tensor, b: torch.Tensor):
        """优化矩阵乘法的内存布局"""
        # 1. 确保连续内存
        a = a.contiguous()
        b = b.contiguous()
        
        # 2. 调整内存对齐
        if a.stride(-1) % 32 != 0:
            pad_size = 32 - (a.size(-1) % 32)
            a = F.pad(a, (0, pad_size))
        if b.stride(-1) % 32 != 0:
            pad_size = 32 - (b.size(-1) % 32)
            b = F.pad(b, (0, pad_size))
            
        # 3. 转置B矩阵以优化访问模式
        b = b.transpose(-2, -1).contiguous()
        
        return a, b

    def parallel_matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
        """并行化矩阵乘法计算"""
        m = a.shape[0]
        chunk_size = max(1, m // self.max_threads)
        chunks = [(i, min(i + chunk_size, m)) 
                 for i in range(0, m, chunk_size)]
        
        # 优化内存布局
        a, b = self.optimize_matmul_layout(a, b)
        
        # 并行计算每个块
        futures = []
        for start, end in chunks:
            futures.append(
                self.thread_pool.submit(
                    self.vector_matmul_blocked,
                    a[start:end],
                    b
                )
            )
        
        # 收集并合并结果
        results = [f.result() for f in futures]
        return torch.cat(results, dim=0)

    def adaptive_thread_count(self, batch_size: int, seq_length: int) -> int:
        """动态调整线程数"""
        ideal_threads = min(
            self.max_threads,
            max(
                self.min_threads,
                (batch_size * seq_length) // 1024
            )
        )
        return ideal_threads

    def parallel_qkv_projection(self, hidden_states: torch.Tensor):
        """并行计算query、key���value投影"""
        futures = []
        futures.append(self.thread_pool.submit(self.query, hidden_states))
        futures.append(self.thread_pool.submit(self.key_value, hidden_states))
        
        query_layer = futures[0].result()
        key_value_states = futures[1].result()
        
        return query_layer, key_value_states

    def optimized_attention(self, query_layer, key_layer, value_layer, attention_mask):
        """使用向量化操作的attention计算"""
        attention_scores = self.vector_attention_scores(query_layer, key_layer)
        attention_scores = attention_scores * self.inv_norm_factor
        
        attention_scores = torch.masked_fill(
            attention_scores,
            attention_mask,
            torch.finfo(attention_scores.dtype).min
        )
        
        attention_probs = self.vector_softmax(attention_scores)
        attention_probs = self.attention_dropout(attention_probs)
        
        context_layer = self.vector_matmul(attention_probs, value_layer)
        context_layer = self.vector_layer_norm(
            context_layer,
            self.dense.weight,
            self.dense.bias if self.dense.bias is not None else None
        )
        
        return context_layer

    def parallel_batch_process(self, hidden_states: torch.Tensor) -> torch.Tensor:
        """实现batch维度的数据并行处理"""
        batch_size = hidden_states.shape[0]
        # 根据batch_size决定分块数
        num_chunks = min(batch_size, self.max_threads)
        chunks = torch.chunk(hidden_states, num_chunks, dim=0)
        
        # 并行处理每个batch块
        futures = []
        for chunk in chunks:
            futures.append(
                self.thread_pool.submit(self.process_single_batch, chunk)
            )
        
        # 收集并合并结果
        results = [f.result() for f in futures]
        return torch.cat(results, dim=0)

    def process_single_batch(self, chunk: torch.Tensor) -> torch.Tensor:
        """处理单个batch块"""
        # 1. QKV投影
        query, key_value = self.parallel_qkv_projection(chunk)
        key, value = self.split_tensor_along_last_dim(key_value, 2)
        
        # 2. Attention计算
        context = self.optimized_attention(query, key, value)
        
        return context



    @monitor_performance
    def forward(self, hidden_states, residual, attention_mask, layer_past=None, 
                use_cache=False, output_attentions=False):
        # 1. 动态确定线程数
        batch_size = hidden_states.shape[0]
        seq_length = hidden_states.shape[1]
        thread_count = self.adaptive_thread_count(batch_size, seq_length)
        
        # 2. 调整线程池大小
        if thread_count != self.thread_pool._max_workers:
            self.thread_pool.shutdown()
            self.thread_pool = ThreadPoolExecutor(max_workers=thread_count)
            self.perf_stats['thread_count'].append(thread_count)
        
        # 3. 使用优化的实现
        if self.use_parallel:
            query_layer, key_value_states = self.parallel_qkv_projection(hidden_states)
        else:
            query_layer = self.query(hidden_states)
            key_value_states = self.key_value(hidden_states)
            
        key_layer, value_layer = self.split_tensor_along_last_dim(key_value_states, 2)

        if use_cache:
            # 存储当前的KV到缓存
            self.kv_cache.store(key_layer, value_layer, self.layer_idx)
            
            if layer_past is not None:
                # 从缓存获取历史KV并拼接
                past_key, past_value = self.kv_cache.retrieve(
                    self.layer_idx,
                    0,
                    layer_past[0].size(-2)
                )
                if past_key is not None:
                    key_layer = torch.cat((past_key, key_layer), dim=2)
                    value_layer = torch.cat((past_value, value_layer), dim=2)
        

        context_layer = self.optimized_attention(
            query_layer, key_layer, value_layer, attention_mask
        )
        
        outputs = (context_layer,)
        if use_cache:
            outputs += ((key_layer, value_layer),)
        if output_attentions:
            outputs += (attention_probs,)
            
        return outputs


    def monitor_performance(self, func):
        """性能监控装饰器"""
        def wrapper(*args, **kwargs):
            start_time = time.time()
            result = func(*args, **kwargs)
            end_time = time.time()
            
            self.perf_stats[f'{func.__name__}_time'].append(end_time - start_time)
            
            if args and isinstance(args[0], torch.Tensor):
                self.perf_stats['batch_size'].append(args[0].shape[0])
                self.perf_stats['sequence_length'].append(args[0].shape[1])
                
            return result
        return wrapper

    def get_performance_stats(self):
        """获取性能统计"""
        stats = {
            'avg_attention_time': np.mean(self.perf_stats['attention_time']),
            'avg_softmax_time': np.mean(self.perf_stats['softmax_time']),
            'avg_thread_count': np.mean(self.perf_stats['thread_count']),
            'throughput': len(self.perf_stats['attention_time']) / sum(self.perf_stats['attention_time'])
        }
        return stats

    def benchmark_model(self, input_sizes: List[Tuple[int, int]], num_runs: int = 100):
        """综合性能测试"""
        results = []
        
        for batch_size, seq_length in input_sizes:
            hidden_states = torch.randn(batch_size, seq_length, self.hidden_size)
            attention_mask = torch.ones(batch_size, seq_length)
            
            # 预热
            for _ in range(10):
                self.forward(hidden_states, None, attention_mask)
                
            # 测试运行
            start_time = time.time()
            for _ in range(num_runs):
                self.forward(hidden_states, None, attention_mask)
            end_time = time.time()
            
            avg_time = (end_time - start_time) / num_runs
            tokens_per_second = (batch_size * seq_length) / avg_time
            
            results.append({
                'batch_size': batch_size,
                'seq_length': seq_length,
                'avg_time': avg_time,
                'tokens_per_second': tokens_per_second,
                'thread_count': self.adaptive_thread_count(batch_size, seq_length)
            })
            
        return results


class OptimizedTelechatMLP(TelechatMLP):
    def __init__(self, config: TelechatConfig):
        super().__init__(config)
        # 初始化线程池和动态调度参数
        self.min_threads = 4
        self.max_threads = 64
        self.thread_pool = ThreadPoolExecutor(max_workers=self.max_threads)
        
        # 是否启用优化
        self.use_vector = True
        self.use_parallel = True
        
        # 性能监控
        self.perf_stats = {
            'mlp_time': [],
            'gate_time': [],
            'thread_count': [],
            'batch_size': [],
            'sequence_length': []
        }

    def vector_matmul(self, x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化矩阵乘法"""
        batch_size, seq_len, hidden_size = x.shape
        out_features = weight.shape[0]
        result = torch.zeros((batch_size, seq_len, out_features), 
                           dtype=x.dtype, device=x.device)
        
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)      # 加载输入
            vle16.v v8, (%2)      # 加载权重
            vfmacc.vv v16, v0, v8 # 矩阵乘法
            vse16.v v16, (%3)     # 存储结果
        """
        return result

    def vector_silu(self, x: torch.Tensor) -> torch.Tensor:
        """使用RISC-V向量指令优化SiLU激活函数"""
        result = torch.zeros_like(x)
        asm_code = """
            vsetvli t0, %0, e16, m8
            vle16.v v0, (%1)
            # SiLU(x) = x * sigmoid(x)
            # sigmoid(x) = 1 / (1 + exp(-x))
            vfneg.v v2, v0        # -x
            vfexp.v v4, v2        # exp(-x)
            vfadd.vf v4, v4, 1.0  # 1 + exp(-x)
            vfrec.v v6, v4        # 1 / (1 + exp(-x))
            vfmul.vv v8, v0, v6   # x * sigmoid(x)
            vse16.v v8, (%2)
        """
        return result

    def parallel_gate_up(self, x: torch.Tensor):
        """并行计算gate和up投��"""
        futures = []
        futures.append(self.thread_pool.submit(self.gate_proj, x))
        futures.append(self.thread_pool.submit(self.up_proj, x))
        
        gate_output = futures[0].result()
        up_output = futures[1].result()
        
        return gate_output, up_output

    def adaptive_thread_count(self, batch_size: int, seq_length: int) -> int:
        """动态调整线程数"""
        ideal_threads = min(
            self.max_threads,
            max(
                self.min_threads,
                (batch_size * seq_length) // 1024
            )
        )
        return ideal_threads

    def optimized_mlp(self, x: torch.Tensor):
        """优化的MLP前向计算"""
        # 1. 并行计算gate和up投影
        if self.use_parallel:
            gate_output, up_output = self.parallel_gate_up(x)
        else:
            gate_output = self.gate_proj(x)
            up_output = self.up_proj(x)
            
        # 2. 向量化SiLU激活函数
        if self.use_vector:
            gate_output = self.vector_silu(gate_output)
        else:
            gate_output = F.silu(gate_output)
            
        # 3. 元素级乘法
        intermediate_output = gate_output * up_output
        
        # 4. 向量化down投影
        if self.use_vector:
            output = self.vector_matmul(intermediate_output, self.down_proj.weight)
        else:
            output = self.down_proj(intermediate_output)
            
        return output

    @monitor_performance
    def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
        """优化的MLP前向传播"""
        # 1. 动态确定线程数
        batch_size = hidden_states.shape[0]
        seq_length = hidden_states.shape[1]
        thread_count = self.adaptive_thread_count(batch_size, seq_length)
        
        # 2. 调整线程池大小
        if thread_count != self.thread_pool._max_workers:
            self.thread_pool.shutdown()
            self.thread_pool = ThreadPoolExecutor(max_workers=thread_count)
            self.perf_stats['thread_count'].append(thread_count)
            
        # 3. 使用优化的MLP实现
        output = self.optimized_mlp(hidden_states)
        
        # 4. dropout和残差连接
        output = dropout_add(
            output, residual, self.dropout, self.training
        )
        
        return output

    def monitor_performance(self, func):
        """性能监控装饰器"""
        def wrapper(*args, **kwargs):
            start_time = time.time()
            result = func(*args, **kwargs)
            end_time = time.time()
            
            self.perf_stats[f'{func.__name__}_time'].append(end_time - start_time)
            
            if args and isinstance(args[0], torch.Tensor):
                self.perf_stats['batch_size'].append(args[0].shape[0])
                self.perf_stats['sequence_length'].append(args[0].shape[1])
                
            return result
        return wrapper

    def get_performance_stats(self):
        """获取性能统计"""
        stats = {
            'avg_mlp_time': np.mean(self.perf_stats['mlp_time']),
            'avg_gate_time': np.mean(self.perf_stats['gate_time']),
            'avg_thread_count': np.mean(self.perf_stats['thread_count']),
            'throughput': len(self.perf_stats['mlp_time']) / sum(self.perf_stats['mlp_time'])
        }
        return stats

    def benchmark_model(self, input_sizes: List[Tuple[int, int]], num_runs: int = 100):
        """综合性能测试"""
        results = []
        
        for batch_size, seq_length in input_sizes:
            hidden_states = torch.randn(batch_size, seq_length, self.config.hidden_size)
            residual = torch.randn_like(hidden_states)
            
            # 预热
            for _ in range(10):
                self.forward(hidden_states, residual)
                
            # 测试运行
            start_time = time.time()
            for _ in range(num_runs):
                self.forward(hidden_states, residual)
            end_time = time.time()
            
            avg_time = (end_time - start_time) / num_runs
            tokens_per_second = (batch_size * seq_length) / avg_time
            
            results.append({
                'batch_size': batch_size,
                'seq_length': seq_length,
                'avg_time': avg_time,
                'tokens_per_second': tokens_per_second,
                'thread_count': self.adaptive_thread_count(batch_size, seq_length)
            })
            
        return results


class OptimizedKVCache:
    def __init__(self, config: TelechatConfig):
        self.config = config
        self.cache = {}
        self.block_size = 128  # 可调整的块大小
        
    def reshape_for_cache(self, key: torch.Tensor, value: torch.Tensor):
        """重新排列KV张量以优化缓存访问"""
        # 原始形状: [batch_size, num_heads, seq_len, head_dim]
        # 优化形状: [batch_size, num_heads, num_blocks, block_size, head_dim]
        batch_size, num_heads, seq_len, head_dim = key.shape
        num_blocks = (seq_len + self.block_size - 1) // self.block_size
        
        # 填充到block_size的整数倍
        pad_len = num_blocks * self.block_size - seq_len
        if pad_len > 0:
            key = F.pad(key, (0, 0, 0, pad_len))
            value = F.pad(value, (0, 0, 0, pad_len))
            
        # 重塑为块状结构
        key = key.view(batch_size, num_heads, num_blocks, self.block_size, head_dim)
        value = value.view(batch_size, num_heads, num_blocks, self.block_size, head_dim)
        
        return key, value
        
    def store(self, key: torch.Tensor, value: torch.Tensor, layer_idx: int):
        """存储KV到缓存"""
        # 1. 重塑为优化的内存布局
        key, value = self.reshape_for_cache(key, value)
        
        # 2. 使用连续内存存储
        key = key.contiguous()
        value = value.contiguous()
        
        # 3. 缓存存储
        self.cache[layer_idx] = {
            'key': key,
            'value': value,
            'last_used': time.time()
        }
        
    def retrieve(self, layer_idx: int, start_idx: int, end_idx: int):
        """从缓存获取KV"""
        if layer_idx not in self.cache:
            return None, None
            
        cache_entry = self.cache[layer_idx]
        key = cache_entry['key']
        value = cache_entry['value']
        
        # 计算block索引
        start_block = start_idx // self.block_size
        end_block = (end_idx + self.block_size - 1) // self.block_size
        
        # 提取需要的blocks
        key = key[:, :, start_block:end_block]
        value = value[:, :, start_block:end_block]
        
        # 恢复原始形状
        batch_size, num_heads, num_blocks, _, head_dim = key.shape
        key = key.view(batch_size, num_heads, -1, head_dim)
        value = value.view(batch_size, num_heads, -1, head_dim)
        
        # 裁剪到实际需要的长度
        key = key[:, :, start_idx:end_idx]
        value = value[:, :, start_idx:end_idx]
        
        # 更新使用时间
        cache_entry['last_used'] = time.time()
        
        return key, value
        
    def clear_old_entries(self, max_age: float = 300.0):
        """清理长时间未使用的缓存"""
        current_time = time.time()
        keys_to_remove = []
        
        for layer_idx, cache_entry in self.cache.items():
            if current_time - cache_entry['last_used'] > max_age:
                keys_to_remove.append(layer_idx)
                
        for key in keys_to_remove:
            del self.cache[key]