import torch
import torch.nn as nn
import torch.nn.functional as F

class DynamicDecay(nn.Module):
    """动态多尺度衰减系统"""
    def __init__(self, C, scales=3, lora_dim=64):
        super().__init__()
        self.scales = scales
        # 多尺度衰减参数
        self.dm_weights = nn.Parameter(torch.randn(scales, C))
        self.dm_gate = nn.Linear(C, scales)
        # LoRA适配器
        self.w1 = nn.Linear(C, lora_dim, bias=False)
        self.w2 = nn.Linear(lora_dim, C, bias=False)
        
    def forward(self, x):
        # 计算多尺度门控
        gates = F.softmax(self.dm_gate(x), dim=-1)  # [B,T,S]
        # LoRA投影
        lora = self.w2(F.gelu(self.w1(x)))  # [B,T,C]
        # 多尺度融合
        multi_scale = (lora.unsqueeze(2) * self.dm_weights).sum(dim=2)  # [B,T,C]
        # 合成衰减因子
        decay = -F.softplus(-multi_scale) * gates.sum(dim=-1, keepdim=True) - 0.5
        return decay

class AdaptiveGroupNorm(nn.Module):
    """自适应分群归一化"""
    def __init__(self, groups, channels):
        super().__init__()
        self.groups = groups
        self.ln = nn.GroupNorm(groups, channels, eps=1e-5)
        # 动态参数生成器
        self.gamma_net = nn.Linear(channels, channels)
        self.beta_net = nn.Linear(channels//groups, channels)
        
    def forward(self, x):
        # 标准分组归一化
        x_norm = self.ln(x)
        # 动态缩放因子
        gamma = 1 + torch.tanh(self.gamma_net(x.mean(dim=0)))  # 通道级适应
        # 动态偏置项
        beta = self.beta_net(x.view(x.size(0), self.groups, -1).mean(dim=1))
        return x_norm * gamma + beta

class TimeMixer(nn.Module):
    """改进的时间混合模块"""
    def __init__(self, C, H):
        super().__init__()
        self.C, self.H = C, H
        self.head_size = C // H
        
        # 基础投影层
        self.receptance = nn.Linear(C, C, bias=False)
        self.key = nn.Linear(C, C, bias=False)
        self.value = nn.Linear(C, C, bias=False)
        
        # 动态衰减系统
        self.decay = DynamicDecay(C)
        
        # 时空聚焦单元
        self.t_focus = nn.Linear(C, C)
        self.s_focus = nn.Linear(C, C)
        self.fuse = nn.Linear(2*C, C)
        
        # 归一化层
        self.agn = AdaptiveGroupNorm(H, C)
        
    def wkv_forward(self, r, w, k, v):
        """WKV核心计算"""
        B, T, C = r.shape
        H, N = self.H, self.head_size
        
        # 重塑张量维度 [B,T,H,N]
        r = r.view(B, T, H, N)
        k = k.view(B, T, H, N)
        v = v.view(B, T, H, N)
        w = torch.exp(w.view(B, T, H, N))
        
        # 时变状态计算
        state = torch.zeros(B, H, N, N, device=r.device)
        outputs = []
        for t in range(T):
            curr_r = r[:, t]
            curr_k = k[:, t]
            curr_v = v[:, t]
            curr_w = w[:, t]
            
            # 状态更新公式
            state = state * curr_w.unsqueeze(-1) + curr_k.unsqueeze(-1) @ curr_v.unsqueeze(-2)
            outputs.append((state @ curr_r.unsqueeze(-1)).squeeze(-1))
            
        return torch.stack(outputs, dim=1).view(B, T, C)
    
    def forward(self, x, prev_state=None):
        B, T, C = x.shape
        
        # 接收门/键/值投影
        r = self.receptance(x)
        k = self.key(x)
        v = self.value(x)
        w = self.decay(x)
        
        # WKV计算
        x = self.wkv_forward(r, w, k, v)
        
        # 时空聚焦
        t_feat = F.gelu(self.t_focus(x))
        s_feat = F.gelu(self.s_focus(x.transpose(1,2))).transpose(1,2)
        fused = self.fuse(torch.cat([t_feat, s_feat], dim=-1))
        
        # 归一化与残差
        x = self.agn(x.view(B*T, C)).view(B,T,C) + fused
        return x

class RWKVBlock(nn.Module):
    """改进的RWKV块"""
    def __init__(self, C, H):
        super().__init__()
        self.ln1 = nn.LayerNorm(C)
        self.ln2 = nn.LayerNorm(C)
        self.time_mixer = TimeMixer(C, H)
        self.channel_mixer = nn.Sequential(
            nn.Linear(C, 4*C),
            nn.GELU(),
            nn.Linear(4*C, C)
        )
        # 跨层门控
        self.cls_gate = nn.Linear(2*C, C)
        
    def forward(self, x, prev_r=None):
        # 跨层协同
        residual = x
        if prev_r is not None:
            gate = torch.sigmoid(self.cls_gate(torch.cat([x, prev_r], dim=-1)))
            x = x * gate + prev_r * (1 - gate)
        
        # 时间混合
        x = self.time_mixer(self.ln1(x)) + x
        
        # 通道混合
        x = self.channel_mixer(self.ln2(x)) + x
        return x, residual.detach()

class RWKV(nn.Module):
    """完整RWKV架构"""
    def __init__(self, vocab_size, n_layer=12, n_embd=768, n_head=12):
        super().__init__()
        self.emb = nn.Embedding(vocab_size, n_embd)
        self.blocks = nn.ModuleList([
            RWKVBlock(n_embd, n_head) for _ in range(n_layer)
        ])
        self.ln_out = nn.LayerNorm(n_embd)
        self.head = nn.Linear(n_embd, vocab_size)
        
    def forward(self, tokens, states=None):
        # 嵌入层
        x = self.emb(tokens)
        
        # 逐层处理
        new_states = []
        for i, block in enumerate(self.blocks):
            prev_state = states[i] if states else None
            x, state_out = block(x, prev_state)
            new_states.append(state_out)
        
        # 输出预测
        x = self.ln_out(x)
        logits = self.head(x)
        return logits, new_states

# -------------------- 训练流程 --------------------
def train_step(model, optimizer, batch):
    """单次训练迭代"""
    inputs, targets = batch
    optimizer.zero_grad()
    
    # 前向计算
    logits, _ = model(inputs)
    loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
    
    # 反向传播
    loss.backward()
    optimizer.step()
    return loss.item()

# -------------------- 预测流程 --------------------
def generate(model, prompt, max_len=50, temp=0.9):
    """自回归文本生成"""
    model.eval()
    tokens = tokenizer.encode(prompt).ids
    state = None
    
    with torch.no_grad():
        for _ in range(max_len):
            # 获取下一个token
            logits, new_state = model(torch.tensor([tokens[-1:]]), state)
            probs = F.softmax(logits[:, -1] / temp, dim=-1)
            next_token = torch.multinomial(probs, 1).item()
            tokens.append(next_token)
            state = new_state
    return tokenizer.decode(tokens)

# -------------------- 初始化与使用 --------------------
if __name__ == "__main__":
    # 超参数设置
    vocab_size = 50304  # 根据实际词表调整
    model = RWKV(vocab_size)
    optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)
    
    # 示例训练循环
    for epoch in range(10):
        for batch in dataloader:
            loss = train_step(model, optimizer, batch)
            print(f"Epoch {epoch} Loss: {loss:.4f}")
    
    # 示例生成
    print(generate(model, "The future of AI is"))