import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch import Tensor
from typing import Optional, Tuple

class TensorParallelMultiHeadAttention(nn.Module):
    def __init__(self,
                 d_model: int,
                 num_heads: int,
                 tp_size: int = 1,
                 tp_rank: int = 0,
                 dropout: float = 0.1,
                 bias: bool = True):
        """
        使用Tensor Parallelism的Multi-Head Attention

        参数:
            d_model: 输入的特征维度
            num_heads: 注意力头的总数
            tp_size: Tensor Parallelism的并行度(设备数)
            tp_rank: 当前设备的rank
            dropout: dropout概率
            bias: 是否在线性层使用偏置
        """
        super().__init__()

        assert d_model % num_heads == 0, "d_model必须能被num_heads整除"
        assert num_heads % tp_size == 0, "num_heads必须能被tp_size整除"

        self.d_model = d_model
        self.num_heads = num_heads
        self.tp_size = tp_size
        self.tp_rank = tp_rank
        self.d_k = d_model // num_heads
        self.local_num_heads = num_heads // tp_size  # 每个设备分配的头数

        # 线性变换矩阵 (分片到不同设备)
        self.W_q = nn.Linear(d_model, self.local_num_heads * self.d_k, bias=bias)
        self.W_k = nn.Linear(d_model, self.local_num_heads * self.d_k, bias=bias)
        self.W_v = nn.Linear(d_model, self.local_num_heads * self.d_k, bias=bias)
        self.W_o = nn.Linear(self.local_num_heads * self.d_k, d_model, bias=bias)

        self.dropout = nn.Dropout(dropout)
        self.scaling = 1.0 / math.sqrt(self.d_k)

    def forward(self,
                x: Tensor,
                mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
        """
        前向传播

        参数:
            x: 输入张量, shape: (batch_size, seq_len_q, d_model)
            mask: 掩码张量, shape: (batch_size, seq_len_q, seq_len_k)

        返回:
            output: 注意力输出, shape: (batch_size, seq_len_q, d_model)
            attention_weights: 注意力权重, shape: (batch_size, local_num_heads, seq_len_q, seq_len_k)
        """
        batch_size = x.size(0)

        # 1. 线性变换并分割为多个头 (只在本地头数上操作)
        Q = self.W_q(x).view(batch_size, -1, self.local_num_heads, self.d_k).transpose(1, 2)
        K = self.W_k(x).view(batch_size, -1, self.local_num_heads, self.d_k).transpose(1, 2)
        V = self.W_v(x).view(batch_size, -1, self.local_num_heads, self.d_k).transpose(1, 2)

        # 2. 计算本地注意力分数
        scores = torch.matmul(Q, K.transpose(-2, -1)) * self.scaling

        # 3. 应用mask (如果有)
        if mask is not None:
            scores = scores.masked_fill(mask.unsqueeze(1) == 0, -1e9)

        # 4. 计算注意力权重
        attention_weights = F.softmax(scores, dim=-1)
        attention_weights = self.dropout(attention_weights)

        # 5. 应用注意力权重到V上
        output = torch.matmul(attention_weights, V)

        # 6. 转置回原始维度
        output = output.transpose(1, 2).contiguous().view(
            batch_size, -1, self.local_num_heads * self.d_k)

        # 7. 应用输出线性层
        output = self.W_o(output)

        # 8. 跨设备通信 (AllReduce求和)
        if self.tp_size > 1:
            torch.distributed.all_reduce(output, op=torch.distributed.ReduceOp.SUM)

        return output, attention_weights


def setup_tensor_parallel(tp_size: int, tp_rank: int):
    """
    设置Tensor Parallelism环境

    参数:
        tp_size: 并行度
        tp_rank: 当前设备rank
    """
    if tp_size > 1:
        if not torch.distributed.is_initialized():
            torch.distributed.init_process_group(
                backend='nccl',
                init_method='env://',
                world_size=tp_size,
                rank=tp_rank)

    # 设置设备
    device = torch.device(f'cuda:{tp_rank}')
    torch.cuda.set_device(device)
    return device


# 示例用法
if __name__ == "__main__":
    # 参数设置
    d_model = 1024
    num_heads = 16
    batch_size = 4
    seq_len = 512
    tp_size = 1  # 使用2个GPU进行Tensor Parallelism

    print(f"current world size: {torch.cuda.device_count()}")

    # 模拟两个设备的环境
    for tp_rank in range(tp_size):
        print(f"\n=== 模拟设备 {tp_rank} ===")

        # 设置Tensor Parallelism环境
        device = setup_tensor_parallel(tp_size, tp_rank)

        # 创建TP MHA模块
        tp_mha = TensorParallelMultiHeadAttention(
            d_model, num_heads, tp_size, tp_rank).to(device)

        # 创建随机输入 (每个设备有相同输入)
        x = torch.rand(batch_size, seq_len, d_model // tp_size).to(device)

        # 创建mask (可选)
        mask = torch.ones(batch_size, seq_len, seq_len).to(device)
        mask[:, :, 256:] = 0  # 后部分位置被mask

        # 前向传播
        output, attn_weights = tp_mha(x, mask)

        print(f"设备 {tp_rank} - 输入query形状:", x.shape)
        print(f"设备 {tp_rank} - 输出形状:", output.shape)
        print(f"设备 {tp_rank} - 注意力权重形状:", attn_weights.shape)