import torch


# 练习1: 基本矩阵乘法
def basic_matrix_multiplication():
    # 创建两个2D张量
    A = torch.randn(3, 4)  # 3x4矩阵
    B = torch.randn(4, 2)  # 4x2矩阵
    C = A @ B  # 等价于 torch.matmul(A, B)
    print(f"A @ B shape: {C.shape}")  # 输出: torch.Size([3, 2])


# 练习2: 批量矩阵乘法
def batch_matrix_multiplication():
    # 创建批量矩阵
    batch_size = 2
    A = torch.randn(batch_size, 3, 4)  # batch_size x 3x4矩阵
    B = torch.randn(batch_size, 4, 2)  # batch_size x 4x2矩阵
    C = A @ B  # 对每个批次分别进行矩阵乘法
    print(f"Batch A @ B shape: {C.shape}")  # 输出: torch.Size([2, 3, 2])


# 练习3: 广播矩阵乘法
def broadcast_matrix_multiplication():
    # 单个矩阵与批量矩阵相乘
    A = torch.randn(3, 4)  # 3x4矩阵
    B = torch.randn(2, 4, 2)  # 2x4x2批量矩阵
    C = A @ B  # A会广播到与B相同的批次大小
    print(f"Broadcast A @ B shape: {C.shape}")  # 输出: torch.Size([2, 3, 2])


# 练习4: 多头注意力中的矩阵运算模拟
def multihead_attention_simulation():
    batch_size = 4
    num_heads = 8
    seq_length = 10
    d_k = 64

    # 模拟注意力权重和值矩阵
    attn_weights = torch.randn(batch_size, num_heads, seq_length, seq_length)
    values = torch.randn(batch_size, num_heads, seq_length, d_k)

    # 注意力加权求和
    output = attn_weights @ values
    print(f"Attention output shape: {output.shape}")  # 输出: torch.Size([4, 8, 10, 64])


# 练习5: 与传统torch.matmul的对比
def compare_matmul_methods():
    A = torch.randn(5, 6)
    B = torch.randn(6, 3)

    # 三种等价的写法
    result1 = A @ B
    result2 = torch.matmul(A, B)
    result3 = torch.mm(A, B)  # 仅适用于2D张量

    print(f"All results equal: {torch.allclose(result1, result2) and torch.allclose(result2, result3)}")


if __name__ == "__main__":
    print("练习1: 基本矩阵乘法")
    basic_matrix_multiplication()

    print("\n练习2: 批量矩阵乘法")
    batch_matrix_multiplication()

    print("\n练习3: 广播矩阵乘法")
    broadcast_matrix_multiplication()

    print("\n练习4: 多头注意力模拟")
    multihead_attention_simulation()

    print("\n练习5: 不同矩阵乘法方法对比")
    compare_matmul_methods()
