import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import math

# ----------------------------
# 1. 多头注意力（你提供的代码）
# ----------------------------
class MultiHeadAttention(nn.Module):
    """多头自注意力层"""
    def __init__(self, d_model, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model
        assert d_model % num_heads == 0, "d_model必须能被num_heads整除"
        
        self.depth = d_model // num_heads
        
        # 线性变换层
        self.wq = nn.Linear(d_model, d_model)
        self.wk = nn.Linear(d_model, d_model)
        self.wv = nn.Linear(d_model, d_model)
        
        self.dense = nn.Linear(d_model, d_model)
        
    def split_heads(self, x, batch_size):
        """将张量分割成多个注意力头"""
        x = x.view(batch_size, -1, self.num_heads, self.depth)
        return x.permute(0, 2, 1, 3)  # (batch_size, num_heads, seq_len, depth)
    
    def forward(self, q, k, v, mask=None):
        batch_size = q.shape[0]
        
        # 线性层
        q = self.wq(q)  # (batch_size, seq_len, d_model)
        k = self.wk(k)  # (batch_size, seq_len, d_model)
        v = self.wv(v)  # (batch_size, seq_len, d_model)
        
        # 分割注意力头
        q = self.split_heads(q, batch_size)  # (batch_size, num_heads, seq_len_q, depth)
        k = self.split_heads(k, batch_size)  # (batch_size, num_heads, seq_len_k, depth)
        v = self.split_heads(v, batch_size)  # (batch_size, num_heads, seq_len_v, depth)
        
        # 计算注意力分数
        matmul_qk = torch.matmul(q, k.transpose(-1, -2))  # (batch_size, num_heads, seq_len_q, seq_len_k)
        
        # 缩放
        dk = torch.tensor(self.depth, dtype=torch.float32)
        scaled_attention_logits = matmul_qk / torch.sqrt(dk)
        
        # 掩码处理（这是GPT模型的关键特点）
        if mask is not None:
            scaled_attention_logits += (mask * -1e9)
            
        # softmax得到注意力权重
        attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
        
        # 注意力加权
        output = torch.matmul(attention_weights, v)  # (batch_size, num_heads, seq_len_q, depth)
        
        # 拼接多头的结果
        output = output.permute(0, 2, 1, 3).contiguous()  # (batch_size, seq_len_q, num_heads, depth)
        output = output.view(batch_size, -1, self.d_model)  # (batch_size, seq_len_q, d_model)
        
        output = self.dense(output)
            
        return output, attention_weights

# ----------------------------
# 2. 简易 GPT 模型（仅1层）
# ----------------------------
class SimpleGPT(nn.Module):
    def __init__(self, vocab_size, d_model, num_heads, max_seq_len=32):
        super().__init__()
        self.d_model = d_model
        self.max_seq_len = max_seq_len
        self.token_emb = nn.Embedding(vocab_size, d_model)
        self.pos_emb = nn.Embedding(max_seq_len, d_model)
        self.attention = MultiHeadAttention(d_model, num_heads)
        self.ln1 = nn.LayerNorm(d_model)
        self.ffn = nn.Sequential(
            nn.Linear(d_model, d_model * 4),
            nn.ReLU(),
            nn.Linear(d_model * 4, d_model)
        )
        self.ln2 = nn.LayerNorm(d_model)
        self.output_head = nn.Linear(d_model, vocab_size)
        
    def forward(self, x):
        batch_size, seq_len = x.shape
        assert seq_len <= self.max_seq_len, "序列太长！"
        
        # 词嵌入 + 位置嵌入
        tok_emb = self.token_emb(x)  # (B, L, D)
        pos_ids = torch.arange(seq_len, device=x.device).unsqueeze(0)  # (1, L)
        pos_emb = self.pos_emb(pos_ids)  # (1, L, D)
        x = tok_emb + pos_emb  # (B, L, D)
        
        # 创建 causal mask（下三角）
        mask = torch.tril(torch.ones(seq_len, seq_len, device=x.device))  # (L, L)
        mask = mask.unsqueeze(0).unsqueeze(0)  # (1, 1, L, L) 便于广播
        
        # 自注意力（q=k=v=x）
        attn_out, _ = self.attention(x, x, x, mask=mask)
        x = self.ln1(x + attn_out)
        
        # FFN
        ffn_out = self.ffn(x)
        x = self.ln2(x + ffn_out)
        
        # 输出 logits
        logits = self.output_head(x)  # (B, L, vocab_size)
        return logits

# ----------------------------
# 3. 构造简单数据集
# ----------------------------
class ToyDataset(Dataset):
    def __init__(self, sequences):
        self.sequences = sequences  # list of lists of token ids

    def __len__(self):
        return len(self.sequences)

    def __getitem__(self, idx):
        return torch.tensor(self.sequences[idx], dtype=torch.long)

# 构造一个极简单的语言：重复模式 "A B C A B C ..."
vocab = ["<PAD>", "A", "B", "C"]  # 0 是 padding（虽然这里不用）
token_to_id = {token: i for i, token in enumerate(vocab)}
vocab_size = len(vocab)

# 生成 100 个样本，每个样本长度为 6：[1,2,3,1,2,3]
sequences = []
for _ in range(100):
    seq = [1, 2, 3, 1, 2, 3]  # A=1, B=2, C=3
    sequences.append(seq)

dataset = ToyDataset(sequences)
data_loader = DataLoader(dataset, batch_size=8, shuffle=True)

# ----------------------------
# 4. 你的训练函数（稍作完善）
# ----------------------------
def train_language_model(model, data_loader, optimizer, epochs):
    """语言模型预训练函数"""
    model.train()
    criterion = nn.CrossEntropyLoss()
    
    for epoch in range(epochs):
        total_loss = 0
        for batch in data_loader:
            # batch shape: (batch_size, seq_len=6)
            inputs = batch[:, :-1]   # (B, 5)
            targets = batch[:, 1:]   # (B, 5)
            
            logits = model(inputs)   # (B, 5, vocab_size)
            
            # 计算损失
            loss = criterion(
                logits.reshape(-1, logits.shape[-1]), 
                targets.reshape(-1)
            )
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        avg_loss = total_loss / len(data_loader)
        print(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}")

# ----------------------------
# 5. 启动训练
# ----------------------------
if __name__ == "__main__":
    # 模型参数
    vocab_size = 4
    d_model = 64
    num_heads = 2
    
    # 创建模型
    small_gpt = SimpleGPT(vocab_size, d_model, num_heads, max_seq_len=10)
    
    # 优化器
    optimizer = torch.optim.Adam(small_gpt.parameters(), lr=5e-4)  # 稍大一点，方便观察 loss 下降
    
    # 训练
    print("开始训练...")
    train_language_model(small_gpt, data_loader, optimizer, epochs=20)
    
    # 测试生成（可选）
    print("\n训练完成！尝试预测...")
    small_gpt.eval()
    with torch.no_grad():
        test_input = torch.tensor([[1, 2, 3, 1, 2]])  # 给 "A B C A B"
        logits = small_gpt(test_input)
        pred = logits.argmax(dim=-1)
        print("输入:", test_input.tolist())
        print("预测:", pred.tolist())  # 应该接近 [2,3,1,2,3]