import os
import types
from typing import Optional, Tuple, Union
from loguru import logger
from transformers import PreTrainedModel
from torch import nn
import torch
from torch.nn import functional as F
from transformers.activations import ACT2FN
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.utils import logging, ContextManagers
from contextlib import contextmanager
from accelerate import init_empty_weights

try:
    from xformers import ops as xops
except ImportError:
    xops = None
    logger.warning(
        "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers."
    )


_init_weights = True
@contextmanager
def no_init_weights(_enable=True):
    global _init_weights
    old_init_weights = _init_weights
    if _enable:
        _init_weights = False
    try:
        yield
    finally:
        _init_weights = old_init_weights

class RotaryEmbedding(torch.nn.Module):
    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
        super().__init__()
        self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
        self.max_seq_len_cached = max_position_embeddings
        t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
        freqs = torch.outer(t, self.inv_freq)
        emb = torch.cat((freqs, freqs), dim=-1)
        self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32)
        self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32)
    def forward(self, x, seq_len=None):
        # x: [bs, num_attention_heads, seq_len, head_size]
        # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
        if seq_len > self.max_seq_len_cached:
            self.max_seq_len_cached = seq_len
            t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
            freqs = torch.outer(t, self.inv_freq)
            emb = torch.cat((freqs, freqs), dim=-1)
            self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device)
            self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device)
        elif self.cos_cached.device != x.device:
            self.cos_cached = self.cos_cached.to(x.device)
            self.sin_cached = self.sin_cached.to(x.device)  
        return (
            self.cos_cached[:, :, :seq_len, ...],
            self.sin_cached[:, :, :seq_len, ...],
        )

def rotate_half(x):
    """Rotates half the hidden dims of the input."""
    x1 = x[..., : x.shape[-1] // 2]
    x2 = x[..., x.shape[-1] // 2:]
    return torch.cat((-x2, x1), dim=-1)


def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids):
    cos = cos_.squeeze(1).squeeze(0)  # [seq_len, dim]
    sin = sin_.squeeze(1).squeeze(0)  # [seq_len, dim]
    cos = cos[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
    sin = sin[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
    q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin)
    k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin)
    return q_embed.to(q.dtype), k_embed.to(k.dtype)


class MLP(nn.Module):
    def __init__(
            self,
            hidden_size: int,
            intermediate_size: int,
            output_size: int ,
            hidden_act: str,
    ):
        super().__init__()
        self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
        self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
        self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
        self.act_fn = ACT2FN[hidden_act]
        
    def forward(self, x):

        # print(x.dtype,self.gate_proj.weight.dtype)
        # aa=self.act_fn(self.gate_proj(x))
        # print(type(aa))
        # bb=self.up_proj(x)
        # print(type(bb))
        # print(bb.dtype)
        # print(aa.dtype)
        # return self.down_proj( aa*bb )
        return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
        
class Attention(nn.Module):
    """Multi-headed attention from 'Attention Is All You Need' paper"""
    def __init__(self, ):
        super().__init__()

        self.hidden_size = 4096
        self.num_heads = 32
        self.head_dim = self.hidden_size // self.num_heads
        self.max_position_embeddings = 4096

        if (self.head_dim * self.num_heads) != self.hidden_size:
            raise ValueError(
                f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
                f" and `num_heads`: {self.num_heads})."
            )
        self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
        self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)

    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def forward(
            self,
            hidden_states: torch.Tensor,
            kv_hidden_states: torch.Tensor,
            attention_mask: Optional[torch.Tensor] = None,
            position_ids: Optional[torch.LongTensor] = None,
            past_key_value: Optional[Tuple[torch.Tensor]] = None,
            output_attentions: bool = False,
            use_cache: bool = False,
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        bsz, q_len, _ = hidden_states.size()
        bsz, kv_len, _ =kv_hidden_states.size()
        
        # bsz seqlen 3*hdz -> bsz seqlen 3 hdz -> 1 bsz seqlen 3 hdz -> 3 bsz seqlen 1 hdz -> 3 bsz seqlen hdz
        # proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2)

        # bsz seqlen hdz -> bsz seqlen nh hdz -> bsz  nh seqlen hdz
        # query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
        # key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
        # value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)

        # bsz seqlen hdz -> bsz  nh seqlen hdz
        query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
        key_states = self.k_proj(hidden_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
        value_states = self.v_proj(hidden_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)

        kv_seq_len = key_states.shape[-2]
        if past_key_value is not None:
            kv_seq_len += past_key_value[0].shape[-2]
        cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
        # [bsz, nh, t, hd]

        if past_key_value is not None:
            # reuse k, v, self_attention
            key_states = torch.cat([past_key_value[0], key_states], dim=2)
            value_states = torch.cat([past_key_value[1], value_states], dim=2)

        past_key_value = (key_states, value_states) if use_cache else None
        if xops is not None and self.training:
            attn_weights = None
            query_states = query_states.transpose(1, 2)
            key_states = key_states.transpose(1, 2)
            value_states = value_states.transpose(1, 2)
            attn_output = xops.memory_efficient_attention(
                query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask()
            )
        else:
            with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
                attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask = attention_mask)
            attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
        attn_output = self.o_proj(attn_output)

        if not output_attentions:
            attn_weights = None

        return attn_output, attn_weights, past_key_value


def custom_forward(
            self,
            doc_embeddings,
            input_ids,
            attention_mask,
            position_ids ,
            past_key_values ,
            inputs_embeds ,
            use_cache ,
            output_attentions ,
            output_hidden_states ,
            return_dict,
    ) :
        # print('wtf doc embedding',doc_embeddings)
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        use_cache = use_cache if use_cache is not None else self.config.use_cache

        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # retrieve input_ids and inputs_embeds
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
        
        # TODO 需要在这里介入doc_embedding，而且需要放在开头，原因如下
        # 1.如果放在结尾，kv缓存不好处理。
        # 2.seq_length_with_past 只用来创建注意力矩阵，所以这里还需要更改这个参数，要加上doc_embedding的数量
        if inputs_embeds is None:
            inputs_embeds = self.embed_tokens(input_ids)

        # print('custom forward,input_embeds',inputs_embeds,inputs_embeds.shape)
        inputs_embeds = torch.cat((doc_embeddings, inputs_embeds), dim=1)
        # print('custom forward,input_embeds',inputs_embeds,inputs_embeds.shape)
        batch_size, seq_length, _ = inputs_embeds.shape
        

        seq_length_with_past = seq_length
        past_key_values_length = 0

        if past_key_values is not None:
            past_key_values_length = past_key_values[0][0].shape[2]
            seq_length_with_past = seq_length_with_past + past_key_values_length

        if position_ids is None:
            device = inputs_embeds.device
            position_ids = torch.arange(
                past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
            )
            position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
        else:
            position_ids = position_ids.view(-1, seq_length).long()

        # print('attention_mask',attention_mask)
        # embed positions
        if attention_mask is None:
            attention_mask = torch.ones(
                (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
            )
        attention_mask = self._prepare_decoder_attention_mask(
            attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
        )
        # print('attention_mask',attention_mask)
        hidden_states = inputs_embeds

        if self.gradient_checkpointing and self.training:
            if use_cache:
                # logger.warning_once(
                #     "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
                # )
                use_cache = False

        # decoder layers
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        next_decoder_cache = () if use_cache else None

        for idx, decoder_layer in enumerate(self.layers):
            if output_hidden_states:
                all_hidden_states += (hidden_states,)

            past_key_value = past_key_values[idx] if past_key_values is not None else None

            if self.gradient_checkpointing and self.training:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        # None for past_key_value
                        return module(*inputs, output_attentions, None)

                    return custom_forward

                layer_outputs = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(decoder_layer),
                    hidden_states,
                    attention_mask,
                    position_ids,
                    None,
                )
            else:
                layer_outputs = decoder_layer(
                    hidden_states,
                    attention_mask=attention_mask,
                    position_ids=position_ids,
                    past_key_value=past_key_value,
                    output_attentions=output_attentions,
                    use_cache=use_cache,
                )

            hidden_states = layer_outputs[0]

            if use_cache:
                next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)

            if output_attentions:
                all_self_attns += (layer_outputs[1],)

        hidden_states = self.norm(hidden_states)

        # add hidden states from the last decoder layer
        if output_hidden_states:
            all_hidden_states += (hidden_states,)

        next_cache = next_decoder_cache if use_cache else None
        if not return_dict:
            return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
        return BaseModelOutputWithPast(
            last_hidden_state=hidden_states,
            past_key_values=next_cache,
            hidden_states=all_hidden_states,
            attentions=all_self_attns,
        )



import sys
sys.path.append("../..")
from baichuan_py.modeling_baichuan import BaichuanPreTrainedModel,BaichuanModel,NormHead
from transformers import GenerationConfig,PretrainedConfig,BertModel,BertConfig
class fuseModelWithAppendToBegin(BaichuanPreTrainedModel):
    def __init__(self,  config=None, *model_args, **model_kwargs):
            super().__init__(config)
            # 这个行为看起来是对的但实际是错误的，不知道为什么中间层会全部不一样。。

            self.model = BaichuanModel(config)
            self.sent2vec= BertModel(BertConfig.from_pretrained('/hy-tmp/thenlper/gte-large-zh/config.json'))
            self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
            # self.sent2vec= BertModel.from_pretrained('/hy-tmp/thenlper/gte-large-zh')

            self.projecter=MLP(hidden_size=1024,intermediate_size=11008,output_size=4096,hidden_act='silu')
            # 不钩子了直接把self.llm.model的前向给改了
            # setattr(BaichuanModel, 'forward', classmethod(new_method))
            # self.model.forward=self.custom_forward
            self.post_init()


    @classmethod
    def from_pretrained(
        cls,
        pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
        *model_args,
        config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
        cache_dir: Optional[Union[str, os.PathLike]] = None,
        ignore_mismatched_sizes: bool = False,
        force_download: bool = False,
        local_files_only: bool = False,
        token: Optional[Union[str, bool]] = None,
        revision: str = "main",
        use_safetensors: bool = None,
        **kwargs,
    ):
    
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
            config, model_kwargs = cls.config_class.from_pretrained(
                config_path,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
                force_download=force_download,
                resume_download=False,
                proxies=None,
                local_files_only=local_files_only,
                token=token,
                revision=revision,
                subfolder="",
                _from_auto=False,
                _from_pipeline=None,
                **kwargs,
            )
        else:
            model_kwargs = kwargs
        

        return super(fuseModelWithAppendToBegin, cls).from_pretrained(pretrained_model_name_or_path, *model_args, 
                config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, 
                force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, 
                use_safetensors=use_safetensors, **kwargs)

    def forward(self,input_ids_for_sent2vec,input_ids_for_llm,
                position_ids=None,
                past_key_values=None,
                inputs_embeds=None,
                labels=None,
                use_cache=None,
                output_attentions=None,
                output_hidden_states=None,
                return_dict=None,
                ):
        # 1.给定一条输入，应该要检索出他的k条相关文档？这个在这里做不好，因为要
        # 我要移动到compute_loss里做吗？
        # self.model.cuda()
        # 因为input_ids_for_sent2vec 是 一个元素里可能有很多个检索结果,所以我应该考虑的事情是先循环处理
        with torch.no_grad():
            doc_embeddings=[]
            for i in input_ids_for_sent2vec:
                model_output = self.sent2vec(**i)
                # 这是一句话的输出,但其实本身shape应该是 document_size x 768
                mean_output = self.mean_pooling(model_output, i['attention_mask'])
            # bsz x document_size x 768
            doc_embeddings.append(mean_output) 
        
        for i,d in enumerate(doc_embeddings):
            doc_embeddings[i]=self.projecter(d)
        if torch.distributed.get_rank() == 0:
            print('len(doc_embeddings)',len(doc_embeddings),doc_embeddings[0].shape)
        doc_embeddings=torch.stack(doc_embeddings)
        if torch.distributed.get_rank() == 0:
            print('doc_embeddings.shape',doc_embeddings.shape)
        # if torch.distributed.get_rank() == 0:
        #     print('doc_embeddings.shape',doc_embeddings.shape)
 
        # 假设钩子挂上了，那么这里应该是
        outputs = self.model(
            doc_embeddings=doc_embeddings,
            input_ids=input_ids_for_llm.input_ids,
            # attention_mask=input_ids_for_llm.attention_mask,
            attention_mask=None, # 这里得关掉，不然里面会报错
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        hidden_states = outputs[0]
        logits = self.lm_head(hidden_states)
        loss = None
        if torch.distributed.get_rank() == 0:
            print('labels',labels,labels.shape)
            print('logits',logits,logits.shape)
        if labels is not None:
            # Shift so that tokens < n predict n
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            if torch.distributed.get_rank() == 0:
                print('shift_logits',shift_logits,shift_logits.shape)
                print('shift_labels',shift_labels,shift_labels.shape)
            # Flatten the tokens
            loss_fct = CrossEntropyLoss()
            shift_logits = shift_logits.view(-1, self.config.vocab_size)
            shift_labels = shift_labels.view(-1)

            if torch.distributed.get_rank() == 0:
                print('after shift_logits',shift_logits,shift_logits.shape)
                print('after shift_labels',shift_labels,shift_labels.shape)

            softmax_normalizer = shift_logits.max(-1).values ** 2
            z_loss = self.config.z_loss_weight * softmax_normalizer.mean()
            # Enable model parallelism
            shift_labels = shift_labels.to(shift_logits.device)
            loss = loss_fct(shift_logits, shift_labels) + z_loss

        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output

        return CausalLMOutputWithPast(
            loss=loss,
            logits=logits,
            past_key_values=outputs.past_key_values,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
        
            

        # # 这里加0.1是因为，反正是保序变换了，整大一点，完全没关系，除了会影响相似度的绝对值，不会影响序列关系，之前碰到阴间case，可能是精度问题，导致最大精度稍微溢出1.0
        # maxNorm=torch.max(torch.norm(doc_embeddings,dim=1))
        # print(f'doc_embeddings的shape是{doc_embeddings.shape}，doc_embeddings中最大的范数是{maxNorm}')
        # # 下面这个行为是错误的！因为不是保序变换，正确的做法可能得是算出max norm，然后直接除？
        # # doc_embeddings=torch.nn.functional.normalize(doc_embeddings, p=2, dim=1)
        # # doc_embeddings=doc_embeddings/maxNorm
        # # print(f'doc_embeddings的shape是{doc_embeddings.shape}，norm之后，doc_embeddings中最大的范数是{torch.max(torch.norm(doc_embeddings,dim=1))}')
        # return doc_embeddings,maxNorm
        pass

    def mean_pooling(self,model_output, attention_mask):
        """
        本来参数是model_output，但是我在外面抽出了最后一层状态，这样有很大的问题，因为这里依赖于attention矩阵！好在这个正则化相当于自身的归一。
        之所以需要这一步，是因为pad位置的输出还不一样，而且也不是0，为了消除这个影响，只能手动对他们置于0
        """
        token_embeddings = model_output.last_hidden_state  # First element of model_output contains all token embeddings
        # 这个操作使得mask和embedding是一个维度了，本来一个是bsz x seqlen x hdz，mask是bsz x seqlen的，unsqueeze之后就是bsz x seqlen x1
        # 然后在最后一维复制hdz次，转成float是因为下面要运算
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        # 需要被mask掉的位置就会失去他的光辉，沿着句长维度求和。clamp是把数压缩在min，max之间，也是沿着句长维度求和，
        # 之所以min取了一个数字，是因为全0的问题？或者下溢出？
        return (torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)).bfloat16()

    