# coding=utf-8
# Copyright 2023 Bo Peng and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Hierarchical RWKV7 model."""

import math
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Union, Tuple, List

import torch
import torch.utils.checkpoint
from torch import nn
import torch.nn.functional as F

from ...generation import GenerationMixin
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import PreTrainedModel
from ...utils import (
    ModelOutput,
    auto_docstring,
    is_bitsandbytes_available,
    is_ninja_available,
    is_torch_cuda_available,
    logging,
)
from .configuration_rwkv import RwkvConfig


logger = logging.get_logger(__name__)


@dataclass
class HierarchicalRwkvConfig(RwkvConfig):
    """
    层级RWKV7模型配置类
    
    Args:
        low_level_layers (int): Low level思考层数量，默认4层 (0-3层)
        high_level_layers (int): High level思考层数量，默认7层 (4-10层)
        N_iterations (int): 层级迭代次数N，默认2
        T_interval (int): High level更新间隔T，默认2
        N_supervision (int): Deep Supervision步数，默认3
        use_hierarchical_state (bool): 是否使用层级状态管理，默认True
        state_fusion_method (str): 状态融合方法，可选'concat', 'add', 'gate'，默认'gate'
    """
    low_level_layers: int = 4
    high_level_layers: int = 7
    N_iterations: int = 2
    T_interval: int = 2
    N_supervision: int = 3
    use_hierarchical_state: bool = True
    state_fusion_method: str = "gate"  # 'concat', 'add', 'gate'
    
    def __post_init__(self):
        super().__post_init__()
        # 确保层级配置合理
        if self.low_level_layers + self.high_level_layers != self.num_hidden_layers:
            logger.warning(f"层级配置不匹配：low_level_layers({self.low_level_layers}) + "
                         f"high_level_layers({self.high_level_layers}) != "
                         f"num_hidden_layers({self.num_hidden_layers})")
            # 自动调整
            self.high_level_layers = self.num_hidden_layers - self.low_level_layers


class StateFusion(nn.Module):
    """状态融合模块"""
    
    def __init__(self, hidden_size: int, method: str = "gate"):
        super().__init__()
        self.method = method
        self.hidden_size = hidden_size
        
        if method == "gate":
            self.gate = nn.Sequential(
                nn.Linear(hidden_size * 2, hidden_size),
                nn.Sigmoid()
            )
        elif method == "concat":
            self.projection = nn.Linear(hidden_size * 2, hidden_size)
        elif method == "add":
            self.norm = nn.LayerNorm(hidden_size)
        else:
            raise ValueError(f"不支持的状态融合方法: {method}")
    
    def forward(self, zH: torch.Tensor, zL: torch.Tensor) -> torch.Tensor:
        """
        融合High level和Low level状态
        
        Args:
            zH: High level状态 [batch_size, seq_len, hidden_size]
            zL: Low level状态 [batch_size, seq_len, hidden_size]
        
        Returns:
            融合后的状态 [batch_size, seq_len, hidden_size]
        """
        if self.method == "add":
            return self.norm(zH + zL)
        elif self.method == "concat":
            concat_state = torch.cat([zH, zL], dim=-1)
            return self.projection(concat_state)
        elif self.method == "gate":
            concat_state = torch.cat([zH, zL], dim=-1)
            gate = self.gate(concat_state)
            return gate * zH + (1 - gate) * zL
        else:
            raise ValueError(f"不支持的状态融合方法: {self.method}")


class HierarchicalRwkvBlock(GradientCheckpointingLayer):
    """层级RWKV Block，支持High/Low level状态交互"""
    
    def __init__(self, config: HierarchicalRwkvConfig, layer_id: int, level: str = "low"):
        super().__init__()
        self.config = config
        self.layer_id = layer_id
        self.level = level  # "low" or "high"
        
        # 基础RWKV Block组件
        if layer_id == 0:
            self.pre_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
        
        self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
        self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
        
        # 从原始RWKV代码中导入的组件
        from .modeling_rwkv import RwkvSelfAttention, RwkvFeedForward
        self.attention = RwkvSelfAttention(config, layer_id)
        self.feed_forward = RwkvFeedForward(config, layer_id)
        
        # 层级交互组件
        if config.use_hierarchical_state:
            self.state_fusion = StateFusion(config.hidden_size, config.state_fusion_method)
            
            # Cross-level attention for state interaction
            if level == "low":
                self.cross_attention = nn.MultiheadAttention(
                    config.hidden_size, 
                    num_heads=config.hidden_size // 64,
                    batch_first=True
                )
            elif level == "high":
                self.cross_attention = nn.MultiheadAttention(
                    config.hidden_size,
                    num_heads=config.hidden_size // 64, 
                    batch_first=True
                )
    
    def forward(self, hidden, state=None, use_cache=False, output_attentions=False, 
                cross_state=None, level_state=None):
        """
        层级RWKV Block前向传播
        
        Args:
            hidden: 输入隐藏状态
            state: RWKV状态
            use_cache: 是否使用缓存
            output_attentions: 是否输出注意力
            cross_state: 跨层级状态 (zH for low level, zL for high level)
            level_state: 同层级状态
        """
        if self.layer_id == 0:
            hidden = self.pre_ln(hidden)
        
        # 层级状态交互
        if self.config.use_hierarchical_state and cross_state is not None:
            if self.level == "low":
                # Low level接收High level状态指导
                cross_attn_out, _ = self.cross_attention(
                    hidden, cross_state, cross_state
                )
                hidden = hidden + cross_attn_out
            elif self.level == "high":
                # High level接收Low level状态信息
                cross_attn_out, _ = self.cross_attention(
                    hidden, cross_state, cross_state
                )
                hidden = hidden + cross_attn_out
        
        # 标准RWKV处理
        attention, state = self.attention(self.ln1(hidden), state=state, use_cache=use_cache)
        hidden = hidden + attention
        
        feed_forward, state = self.feed_forward(self.ln2(hidden), state=state)
        hidden = hidden + feed_forward
        
        outputs = (hidden, state)
        if output_attentions:
            outputs += (attention,)
        else:
            outputs += (None,)
        
        return outputs


def hrm_forward(zH: torch.Tensor, zL: torch.Tensor, x: torch.Tensor, 
                low_blocks: nn.ModuleList, high_blocks: nn.ModuleList,
                config: HierarchicalRwkvConfig, 
                low_states: List[torch.Tensor] = None,
                high_states: List[torch.Tensor] = None,
                use_cache: bool = False) -> Tuple[torch.Tensor, torch.Tensor, List, List]:
    """
    层级模型结构函数 (Hierarchical Reasoning Model)
    
    Args:
        zH: High level状态
        zL: Low level状态  
        x: 输入嵌入
        low_blocks: Low level RWKV blocks
        high_blocks: High level RWKV blocks
        config: 层级配置
        low_states: Low level RWKV states
        high_states: High level RWKV states
        use_cache: 是否使用缓存
    
    Returns:
        (zH_new, zL_new, low_states_new, high_states_new)
    """
    batch_size, seq_len, hidden_size = x.shape
    
    # 初始化状态
    if low_states is None:
        low_states = [None] * len(low_blocks)
    if high_states is None:
        high_states = [None] * len(high_blocks)
    
    # 层级迭代处理
    with torch.no_grad():
        for i in range(config.N_iterations * config.T_interval - 1):
            # Low level处理
            zL_input = x + zL  # 输入与Low level状态融合
            for j, block in enumerate(low_blocks):
                zL_input, low_states[j], _ = block(
                    zL_input, 
                    state=low_states[j], 
                    use_cache=use_cache,
                    cross_state=zH,
                    level_state=zL
                )
            zL = zL_input
            
            # 定期更新High level
            if (i + 1) % config.T_interval == 0:
                zH_input = zL  # High level基于Low level结果
                for j, block in enumerate(high_blocks):
                    zH_input, high_states[j], _ = block(
                        zH_input,
                        state=high_states[j],
                        use_cache=use_cache, 
                        cross_state=zL,
                        level_state=zH
                    )
                zH = zH_input
    
    # 最后一步带梯度
    # Low level处理
    zL_input = x + zL
    for j, block in enumerate(low_blocks):
        zL_input, low_states[j], _ = block(
            zL_input,
            state=low_states[j],
            use_cache=use_cache,
            cross_state=zH,
            level_state=zL
        )
    zL = zL_input
    
    # High level处理
    zH_input = zL
    for j, block in enumerate(high_blocks):
        zH_input, high_states[j], _ = block(
            zH_input,
            state=high_states[j], 
            use_cache=use_cache,
            cross_state=zL,
            level_state=zH
        )
    zH = zH_input
    
    return zH, zL, low_states, high_states


@auto_docstring
class HierarchicalRwkvPreTrainedModel(PreTrainedModel):
    """层级RWKV预训练模型基类"""
    config: HierarchicalRwkvConfig
    base_model_prefix = "hierarchical_rwkv"
    _no_split_modules = ["HierarchicalRwkvBlock"]
    _keep_in_fp32_modules = ["time_decay", "time_first"]
    supports_gradient_checkpointing = True
    _is_stateful = True
    
    def _init_weights(self, module: nn.Module):
        """初始化权重"""
        # 复用原始RWKV的权重初始化
        from .modeling_rwkv import RwkvPreTrainedModel
        base_init = RwkvPreTrainedModel._init_weights
        base_init(self, module)


@dataclass
@auto_docstring(
    custom_intro="""
    Class for the Hierarchical RWKV model outputs.
    """
)
class HierarchicalRwkvOutput(ModelOutput):
    """层级RWKV模型输出"""
    
    last_hidden_state: Optional[torch.FloatTensor] = None
    high_level_state: Optional[torch.FloatTensor] = None
    low_level_state: Optional[torch.FloatTensor] = None
    state: Optional[list[torch.FloatTensor]] = None
    hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
    attentions: Optional[tuple[torch.FloatTensor, ...]] = None


@dataclass
@auto_docstring(
    custom_intro="""
    Base class for hierarchical causal language model outputs.
    """
)
class HierarchicalRwkvCausalLMOutput(ModelOutput):
    """层级RWKV因果语言模型输出"""
    
    loss: Optional[torch.FloatTensor] = None
    logits: Optional[torch.FloatTensor] = None
    high_level_state: Optional[torch.FloatTensor] = None
    low_level_state: Optional[torch.FloatTensor] = None
    state: Optional[list[torch.FloatTensor]] = None
    hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
    attentions: Optional[tuple[torch.FloatTensor, ...]] = None


@auto_docstring
class HierarchicalRwkvModel(HierarchicalRwkvPreTrainedModel):
    """层级RWKV模型"""
    
    def __init__(self, config: HierarchicalRwkvConfig):
        super().__init__(config)
        
        self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
        
        # 分层创建RWKV blocks
        self.low_blocks = nn.ModuleList([
            HierarchicalRwkvBlock(config, layer_id=idx, level="low") 
            for idx in range(config.low_level_layers)
        ])
        
        self.high_blocks = nn.ModuleList([
            HierarchicalRwkvBlock(config, layer_id=idx + config.low_level_layers, level="high")
            for idx in range(config.high_level_layers)
        ])
        
        self.ln_out = nn.LayerNorm(config.hidden_size)
        
        # 层级状态管理
        self.state_fusion = StateFusion(config.hidden_size, config.state_fusion_method)
        
        self.layers_are_rescaled = False
        self.gradient_checkpointing = False
        
        # 初始化权重
        self.post_init()
    
    def get_input_embeddings(self):
        return self.embeddings
    
    def set_input_embeddings(self, new_embeddings):
        self.embeddings = new_embeddings
    
    @auto_docstring
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.LongTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        state: Optional[list[torch.FloatTensor]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        zH: Optional[torch.FloatTensor] = None,
        zL: Optional[torch.FloatTensor] = None,
    ) -> Union[tuple, HierarchicalRwkvOutput]:
        """
        层级RWKV模型前向传播
        
        Args:
            input_ids: 输入token ids
            attention_mask: 注意力掩码 (未使用)
            inputs_embeds: 输入嵌入
            state: RWKV状态
            use_cache: 是否使用缓存
            output_attentions: 是否输出注意力
            output_hidden_states: 是否输出隐藏状态
            return_dict: 是否返回字典
            zH: High level状态
            zL: Low level状态
        """
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        
        if attention_mask is not None:
            logger.warning_once("`attention_mask` was passed, but it is unused in this model.")
        
        if self.training == self.layers_are_rescaled:
            self._rescale_layers()
        
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        elif input_ids is None and inputs_embeds is None:
            raise ValueError("You have to specify either input_ids or inputs_embeds")
        
        if inputs_embeds is None:
            inputs_embeds = self.embeddings(input_ids)
        
        batch_size, seq_len, hidden_size = inputs_embeds.shape
        
        # 初始化层级状态
        if zH is None:
            zH = torch.zeros_like(inputs_embeds)
        if zL is None:
            zL = torch.zeros_like(inputs_embeds)
        
        # 初始化RWKV状态
        if use_cache and state is None:
            shape = (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers)
            state = [
                torch.zeros(
                    *shape, dtype=inputs_embeds.dtype if i <= 1 else torch.float32, device=inputs_embeds.device
                )
                for i in range(5)
            ]
            state[4] -= 1e30
        
        if self.gradient_checkpointing and self.training:
            if use_cache:
                logger.warning_once(
                    "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
                )
                use_cache = False
        
        # 分离Low和High level状态
        low_states = state[:self.config.low_level_layers] if state else [None] * self.config.low_level_layers
        high_states = state[self.config.low_level_layers:] if state else [None] * self.config.high_level_layers
        
        # 层级模型前向传播
        zH, zL, low_states, high_states = hrm_forward(
            zH, zL, inputs_embeds,
            self.low_blocks, self.high_blocks,
            self.config,
            low_states, high_states,
            use_cache
        )
        
        # 状态融合
        if self.config.use_hierarchical_state:
            hidden_states = self.state_fusion(zH, zL)
        else:
            hidden_states = zH  # 默认使用High level状态
        
        # 输出层归一化
        hidden_states = self.ln_out(hidden_states)
        
        # 合并状态
        combined_state = low_states + high_states
        
        if not return_dict:
            return tuple(x for x in [hidden_states, zH, zL, combined_state, None, None] if x is not None)
        
        return HierarchicalRwkvOutput(
            last_hidden_state=hidden_states,
            high_level_state=zH,
            low_level_state=zL,
            state=combined_state,
            hidden_states=None,
            attentions=None,
        )
    
    def _rescale_layers(self):
        """层级重缩放"""
        if self.layers_are_rescaled == (not self.training):
            return
        
        if self.config.rescale_every > 0:
            with torch.no_grad():
                # 重缩放Low level blocks
                for block_id, block in enumerate(self.low_blocks):
                    if self.training:
                        block.attention.output.weight.mul_(2 ** int(block_id // self.config.rescale_every))
                        block.feed_forward.value.weight.mul_(2 ** int(block_id // self.config.rescale_every))
                    else:
                        if hasattr(block.attention.output.weight, "SCB"):
                            block.attention.output.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))
                            block.feed_forward.value.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))
                        else:
                            block.attention.output.weight.div_(2 ** int(block_id // self.config.rescale_every))
                            block.feed_forward.value.weight.div_(2 ** int(block_id // self.config.rescale_every))
                
                # 重缩放High level blocks
                for block_id, block in enumerate(self.high_blocks):
                    actual_block_id = block_id + self.config.low_level_layers
                    if self.training:
                        block.attention.output.weight.mul_(2 ** int(actual_block_id // self.config.rescale_every))
                        block.feed_forward.value.weight.mul_(2 ** int(actual_block_id // self.config.rescale_every))
                    else:
                        if hasattr(block.attention.output.weight, "SCB"):
                            block.attention.output.weight.SCB.div_(2 ** int(actual_block_id // self.config.rescale_every))
                            block.feed_forward.value.weight.SCB.div_(2 ** int(actual_block_id // self.config.rescale_every))
                        else:
                            block.attention.output.weight.div_(2 ** int(actual_block_id // self.config.rescale_every))
                            block.feed_forward.value.weight.div_(2 ** int(actual_block_id // self.config.rescale_every))
        
        self.layers_are_rescaled = not self.training


@auto_docstring(
    custom_intro="""
    The Hierarchical RWKV Model transformer with a language modeling head on top.
    """
)
class HierarchicalRwkvForCausalLM(HierarchicalRwkvPreTrainedModel, GenerationMixin):
    """层级RWKV因果语言模型"""
    _tied_weights_keys = ["head.weight"]
    
    def __init__(self, config: HierarchicalRwkvConfig):
        super().__init__(config)
        self.hierarchical_rwkv = HierarchicalRwkvModel(config)
        self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
        
        # 初始化权重
        self.post_init()
    
    def get_output_embeddings(self):
        return self.head
    
    def set_output_embeddings(self, new_embeddings):
        self.head = new_embeddings
    
    def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, 
                                    use_cache=None, zH=None, zL=None, **kwargs):
        """准备生成输入"""
        # 只使用最后一个token
        if state is not None:
            input_ids = input_ids[:, -1].unsqueeze(-1)
        
        if inputs_embeds is not None and state is None:
            model_inputs = {"inputs_embeds": inputs_embeds}
        else:
            model_inputs = {"input_ids": input_ids}
        
        model_inputs["state"] = state
        model_inputs["use_cache"] = use_cache
        model_inputs["zH"] = zH
        model_inputs["zL"] = zL
        return model_inputs
    
    @auto_docstring
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.LongTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        state: Optional[list[torch.FloatTensor]] = None,
        labels: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        zH: Optional[torch.FloatTensor] = None,
        zL: Optional[torch.FloatTensor] = None,
        **kwargs,
    ) -> Union[tuple, HierarchicalRwkvCausalLMOutput]:
        """层级RWKV因果语言模型前向传播"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        
        hierarchical_outputs = self.hierarchical_rwkv(
            input_ids,
            inputs_embeds=inputs_embeds,
            state=state,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            zH=zH,
            zL=zL,
        )
        
        hidden_states = hierarchical_outputs[0]
        logits = self.head(hidden_states)
        
        loss = None
        if labels is not None:
            # 计算交叉熵损失
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            loss = F.cross_entropy(
                shift_logits.view(-1, shift_logits.size(-1)), 
                shift_labels.view(-1), 
                ignore_index=-100
            )
        
        if not return_dict:
            output = (logits,) + hierarchical_outputs[1:]
            return ((loss,) + output) if loss is not None else output
        
        return HierarchicalRwkvCausalLMOutput(
            loss=loss,
            logits=logits,
            high_level_state=hierarchical_outputs.high_level_state,
            low_level_state=hierarchical_outputs.low_level_state,
            state=hierarchical_outputs.state,
            hidden_states=hierarchical_outputs.hidden_states,
            attentions=hierarchical_outputs.attentions,
        )


class DeepSupervisionTrainer:
    """Deep Supervision训练器"""
    
    def __init__(self, model: HierarchicalRwkvForCausalLM, config: HierarchicalRwkvConfig):
        self.model = model
        self.config = config
        self.N_supervision = config.N_supervision
    
    def train_step(self, input_ids: torch.LongTensor, labels: torch.LongTensor, 
                   optimizer: torch.optim.Optimizer) -> dict:
        """
        Deep Supervision训练步骤
        
        Args:
            input_ids: 输入token ids [batch_size, seq_len]
            labels: 标签 [batch_size, seq_len]
            optimizer: 优化器
        
        Returns:
            训练统计信息
        """
        batch_size, seq_len = input_ids.shape
        device = input_ids.device
        
        # 初始化层级状态
        zH = torch.zeros(batch_size, seq_len, self.config.hidden_size, device=device)
        zL = torch.zeros(batch_size, seq_len, self.config.hidden_size, device=device)
        
        total_loss = 0.0
        supervision_losses = []
        
        # Deep Supervision循环
        for step in range(self.N_supervision):
            # 前向传播
            outputs = self.model(
                input_ids=input_ids,
                labels=labels,
                zH=zH,
                zL=zL,
                return_dict=True
            )
            
            loss = outputs.loss
            supervision_losses.append(loss.item())
            total_loss += loss
            
            # 反向传播
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            
            # 分离状态用于下一步
            zH = outputs.high_level_state.detach()
            zL = outputs.low_level_state.detach()
        
        avg_loss = total_loss / self.N_supervision
        
        return {
            "total_loss": total_loss.item(),
            "avg_loss": avg_loss.item(),
            "supervision_losses": supervision_losses,
            "final_zH_norm": torch.norm(zH).item(),
            "final_zL_norm": torch.norm(zL).item()
        }


__all__ = [
    "HierarchicalRwkvConfig",
    "HierarchicalRwkvModel", 
    "HierarchicalRwkvForCausalLM",
    "HierarchicalRwkvPreTrainedModel",
    "DeepSupervisionTrainer",
    "hrm_forward",
    "StateFusion"
]
