import torch
import torch.nn as nn
import torch.nn.functional as F
import string


# 将字符串转换为索引
def text_to_indices(text, char_to_index):
    return [char_to_index[char] for char in text]

def paramsEmbedding(parameters_list, embedding_dim=50, num_filters=16):
    # 定义字符集和嵌入维度
    char_set = list(string.printable)  # 包含所有可打印字符
    char_to_index = {char: idx for idx, char in enumerate(char_set, 1)}  # 字符到索引的映射

    parameters = [','.join(p) for p in parameters_list]
    # 将所有参数转换为索引，并确保它们具有相同的长度（可以使用填充）
    max_length = max(len(p) for p in parameters)
    max_length = max(max_length, 5)
    padded_indices = [text_to_indices(p.ljust(max_length), char_to_index) for p in parameters]

    # 转换为tensor，形状为 (num_params, max_length)
    char_indices = torch.tensor(padded_indices, dtype=torch.long)

    # 添加批次维度，形状为 (batch_size, num_params, max_length)
    char_indices = char_indices.unsqueeze(0)

    # 定义嵌入层、卷积层和池化层
    embedding_layer = nn.Embedding(num_embeddings=len(char_set) + 1, embedding_dim=embedding_dim)
    conv_layer = nn.Conv1d(in_channels=embedding_dim, out_channels=num_filters, kernel_size=3)
    pooling_layer = nn.AdaptiveMaxPool1d(1)

    # 嵌入
    char_embeddings = embedding_layer(char_indices)  # (batch_size, num_params, max_length, embedding_dim)

    # 转换维度以匹配Conv1d输入，形状为 (batch_size * num_params, embedding_dim, max_length)
    char_embeddings = char_embeddings.view(-1, max_length, embedding_dim).transpose(1, 2)

    # 卷积和池化
    conv_output = F.relu(conv_layer(char_embeddings))  # (batch_size * num_params, num_filters, conv_output_length)
    pooled_output = pooling_layer(conv_output).squeeze(-1)  # (batch_size * num_params, num_filters)

    # 恢复到 (batch_size, num_params, num_filters) 形状
    pooled_output = pooled_output.view(char_indices.size(0), char_indices.size(1), -1)
    
    return pooled_output[0].detach().numpy()

    # # 1. 将形状转换成二维张量
    # pooled_output_flattened = pooled_output.view(pooled_output.size(0), -1)  # 形状为 (batch_size, num_params * num_filters)

    # # 2. 定义一个线性全连接层，将输入特征的维度设置为 num_params * num_filters，输出特征的维度设置为 num_filters
    # linear_layer = nn.Linear(in_features=pooled_output_flattened.size(1), out_features=num_filters)

    # # 3. 通过线性全连接层进行变换
    # output = linear_layer(pooled_output_flattened)  # 形状为 (batch_size, num_filters)

    # return output

# if __name__ == '__main__':
#     paramsList = [['abc', 'abcd'], ['abc123', 'abc1234', 'abc12345']]
#     paramsEmbedding(paramsList)