import torch
import torch.nn.functional as F

# # 1. 定义输入（随机初始化词向量）
# embed_size = 4
# eng_words = ["The", "cat", "sat", "on", "the", "mat"]
# fr_words = ["Le", "chat", "s'est", "assis", "sur", "le", "tapis"]

# eng_embeddings = torch.randn(len(eng_words), embed_size)  # Key & Value
# fr_embeddings = torch.randn(len(fr_words), embed_size)    # Query

# # 2. 计算注意力权重
# scores = torch.matmul(fr_embeddings, eng_embeddings.T)  # [7, 6]
# attn_weights = F.softmax(scores, dim=-1)  # [7, 6]

# # 3. 加权求和
# attn_output = torch.matmul(attn_weights, eng_embeddings)  # [7, 4]

# # 打印结果
# print("注意力权重矩阵示例:")
# print(attn_weights)
# print("\n'chat' 对 'cat' 的注意力分数:", attn_weights[1, 1].item())
# print("\n注意力输出:", attn_output.shape)




employee_ids_str = "350426198608247039,533022199407150322,362329199411197412,51352219801005311X,522127198009036532"
employee_ids_list = employee_ids_str.split(',')
print("Employee IDs as list:", employee_ids_list)
max_size = 2
if len(employee_ids_list) > max_size:
    chunks_list = [employee_ids_list[i:i + max_size] for i in range(0, len(employee_ids_list), max_size)]
else:
    chunks_list = [employee_ids_list]
print(chunks_list)