import torch

a = torch.tensor([[1,2],[3,4]])
b = torch.tensor([[2,1],[4,5]])

v = torch.randn(3, 1, 1)
print('shape: ', v)
print('avg: ', v.mean())
#-----------------------------------------------
batch_size = 2
K_SIZE = 3
N_ROBOTS = 5
next_logps = torch.randn(N_ROBOTS, batch_size, K_SIZE)
next_logp = torch.zeros(batch_size, K_SIZE**N_ROBOTS, 1)
for i in range(K_SIZE):
    for j in range(K_SIZE):
        for k in range(K_SIZE):
            for l in range(K_SIZE):
                for m in range(K_SIZE):
                    next_logp[:, i*1 + j*K_SIZE + k*K_SIZE**2 + l*K_SIZE**3 + m*K_SIZE**4, 0] = next_logps[0, :, i] + next_logps[1, :, j] + next_logps[2, :, k] + next_logps[3, :, l] + next_logps[4, :, m]


tensor_0 = next_logps[0, :, :].unsqueeze(2).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, K_SIZE, 1, 1, 1, 1)
tensor_1 = next_logps[1, :, :].unsqueeze(1).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, K_SIZE, 1, 1, 1)
tensor_2 = next_logps[2, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, 1, K_SIZE, 1, 1)
tensor_3 = next_logps[3, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(5)  # (batch_size, 1, 1, 1, K_SIZE, 1)
tensor_4 = next_logps[4, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(4)  # (batch_size, 1, 1, 1, 1, K_SIZE)

# 计算 result，通过广播进行相加
result = tensor_0 + tensor_1 + tensor_2 + tensor_3 + tensor_4
# print('result: ', result.shape)
# 调整结果形状
next_logp_b = result.permute(0, 5, 4, 3, 2, 1).reshape(batch_size, K_SIZE**N_ROBOTS, -1)  # (batch_size, K_SIZE**N_ROBOTS, 1)
# print('next_logp: ', next_logp)
# print('next_logp_b: ', next_logp_b)
assert torch.allclose(next_logp, next_logp_b)

# c = torch.einsum('ij->ij', a, b)
# print(c)
batch_size = 2
K_SIZE = 3
tensor = [torch.randn(batch_size, K_SIZE) for _ in range(2)]
# 验证代码
logps = torch.randn(2, batch_size, K_SIZE)
logp = torch.zeros(batch_size, K_SIZE**2, 1)
for i in range(K_SIZE):
    for j in range(K_SIZE):
        logp[:, i*1 + j*K_SIZE, 0] = logps[0, :, i] + logps[1, :, j]
        # logp[:, i * K_SIZE + j, 0] = logps[0, :, i] + logps[1, :, j]

tensor_0 = logps[0, :, :].unsqueeze(2)  # (batch_size, K_SIZE, 1, 1)
tensor_1 = logps[1, :, :].unsqueeze(1)  # (batch_size, 1, K_SIZE, 1)
# tensor_0 = logps[0, :, :, None]  # (batch_size, K_SIZE, 1)
# tensor_1 = logps[1, :, None, :]  # (batch_size, 1, K_SIZE)

# 计算 result，通过广播进行相加
result = tensor_0 + tensor_1  # (batch_size, K_SIZE, K_SIZE, K_SIZE)
# print('result: ', result.shape)
# 调整结果形状
logp_broadcast = result.permute(0, 2, 1).reshape(batch_size, K_SIZE**2, 1)  # (batch_size, K_SIZE**N_ROBOTS, -1)
print('logp_broadcast: ', logp_broadcast.shape)
print('logp: ', logp.shape)
# diff = torch.abs(logp_broadcast - logp)
# print("2Max difference:", diff.max())
assert torch.allclose(logp, logp_broadcast)

logps = torch.randn(3, batch_size, K_SIZE)
logp = torch.zeros(batch_size, K_SIZE, K_SIZE, K_SIZE)
for i in range(K_SIZE):
    for j in range(K_SIZE):
        for k in range(K_SIZE):
            logp[:, i, j, k] = logps[0, :, i] + logps[1, :, j] + logps[2, :, k]

tensor_0 = logps[0, :, :].unsqueeze(2).unsqueeze(3)  # (batch_size, K_SIZE, 1, 1)
tensor_1 = logps[1, :, :].unsqueeze(1).unsqueeze(3)  # (batch_size, 1, K_SIZE, 1)
tensor_2 = logps[2, :, :].unsqueeze(1).unsqueeze(2)  # (batch_size, 1, 1, K_SIZE)

# 计算 result，通过广播进行相加
result = tensor_0 + tensor_1 + tensor_2  # (batch_size, K_SIZE, K_SIZE, K_SIZE)
# print('result: ', result.shape)
# 调整结果形状
# logp_broadcast = result.reshape(batch_size, K_SIZE**3, -1)  # (batch_size, K_SIZE**N_ROBOTS, -1)
# print('logp_broadcast: ', logp_broadcast, 'logp: ', logp)
# diff = torch.abs(logp_broadcast - logp)
# print("3Max difference:", diff.max())
assert torch.allclose(logp, result)

result_loop = torch.zeros((batch_size, K_SIZE, K_SIZE))
for i in range(K_SIZE):
    for j in range(K_SIZE):
        result_loop[:, i, j] = tensor[0][:, i] + tensor[1][:, j]

result_einsum = torch.einsum('bi,bj->bij', tensor[0], tensor[1])
result_broadcast = tensor[0].unsqueeze(2) + tensor[1].unsqueeze(1)

# 检查结果是否相同
# assert torch.allclose(result_loop, result_einsum)
assert torch.allclose(result_loop, result_broadcast)

current_mask = torch.randn(batch_size, K_SIZE*2, 1)

zero = torch.zeros(batch_size, K_SIZE**2, 1)
new_current_mask = zero
for i in range(K_SIZE):
    for j in range(K_SIZE):
        new_current_mask[:, i*1 + j*K_SIZE, :] = current_mask[:, i, :] * current_mask[:, j + K_SIZE, :]
cmake1, cmake2 = current_mask[:, :K_SIZE, :], current_mask[:, K_SIZE:, :]
new_current_mask_2 = cmake1[:, :, None] * cmake2[:, None, :]  # 广播生成 (batch_size, K_SIZE, K_SIZE)
new_current_mask_2 = new_current_mask_2.reshape(batch_size, -1, 1)  # (batch_size, K_SIZE^2, 1)

diff = torch.abs(new_current_mask - new_current_mask_2)
print("Max difference:", diff.max())
assert torch.allclose(new_current_mask, new_current_mask_2)

