import torch
import torch.nn as nn
import torch.nn.functional as F


class ConvTokenEmbedder(nn.Module):
    def __init__(self, vocab_size, char_vocab_size, word_emb_dim=100, char_emb_dim=50, output_dim=512):
        super(ConvTokenEmbedder, self).__init__()

        # 词嵌入层，将每个词的ID映射到一个word_emb_dim维的向量
        self.word_emb_layer = nn.Embedding(vocab_size, word_emb_dim, padding_idx=3)

        # 字符嵌入层，将每个字符的ID映射到一个char_emb_dim维的向量
        self.char_emb_layer = nn.Embedding(char_vocab_size, char_emb_dim, padding_idx=char_vocab_size - 1)

        # 一维卷积层列表，用于从字符嵌入中提取特征
        # 每个卷积层有不同的kernel_size，捕获不同长度的字符n-gram信息
        self.convolutions = nn.ModuleList([
            nn.Conv1d(char_emb_dim, 32, kernel_size=1),
            nn.Conv1d(char_emb_dim, 32, kernel_size=2),
            nn.Conv1d(char_emb_dim, 64, kernel_size=3),
            nn.Conv1d(char_emb_dim, 128, kernel_size=4),
            nn.Conv1d(char_emb_dim, 256, kernel_size=5),
            nn.Conv1d(char_emb_dim, 512, kernel_size=6),
            nn.Conv1d(char_emb_dim, 1024, kernel_size=7)
        ])

        # Highway网络层列表，用于学习字符特征的非线性变换
        # Highway网络允许一部分输入直接通过，同时学习残差变换
        self.highways = nn.ModuleList([
            nn.Linear(2048, 4096),
            nn.Linear(2048, 4096)
        ])

        # 投影层，将词嵌入和字符特征组合后的向量映射到output_dim维
        self.projection = nn.Linear(2148, output_dim)

    def forward(self, word_ids, char_ids):
        # 获取词嵌入 (batch_size, seq_len, word_emb_dim)
        word_emb = self.word_emb_layer(word_ids)
        # 获取字符嵌入 (batch_size, seq_len, char_len, char_emb_dim)
        char_emb = self.char_emb_layer(char_ids)

        batch_size, seq_len, char_len, char_emb_dim = char_emb.shape
        # 调整字符嵌入的形状，以适应一维卷积
        # (batch_size * seq_len, char_emb_dim, char_len)
        char_emb = char_emb.view(batch_size * seq_len, char_emb_dim, char_len)

        # 对字符嵌入应用卷积和ReLU激活函数
        conv_outputs = [F.relu(conv(char_emb)) for conv in self.convolutions]
        # 对每个卷积输出进行最大池化，得到每个卷积核的单个最大值
        conv_outputs = [torch.max(co, dim=2)[0] for co in conv_outputs]
        # 将所有卷积核的最大值拼接在一起，形成字符特征向量
        char_features = torch.cat(conv_outputs, dim=1)
        # char_features = char_features.view(-1, 2048)  # 确保维度正确

        # 通过Highway网络
        for highway in self.highways:
            char_features = F.relu(highway(char_features))

        # 将词嵌入和字符特征拼接在一起
        # (batch_size, seq_len, word_emb_dim + char_features_dim)
        combined = torch.cat([word_emb, char_features.view(batch_size, seq_len, -1)], dim=-1)
        # 通过投影层，得到最终的嵌入向量
        output = self.projection(combined)
        return output


class LstmCellWithProjection(nn.Module):
    def __init__(self, input_dim, hidden_dim, proj_dim):
        super(LstmCellWithProjection, self).__init__()
        # 输入线性变换层，不使用偏置
        self.input_linearity = nn.Linear(input_dim, hidden_dim * 4, bias=False)
        # 隐藏状态线性变换层，使用偏置
        self.state_linearity = nn.Linear(proj_dim, hidden_dim * 4, bias=True)
        # 状态投影层，将隐藏状态投影到proj_dim维，不使用偏置
        self.state_projection = nn.Linear(hidden_dim, proj_dim, bias=False)

    def forward(self, x, hidden_state):
        # 获取上一个时间步的隐藏状态和细胞状态
        h_t, c_t = hidden_state
        # 计算门控值：输入门、遗忘门、细胞状态、输出门
        gates = self.input_linearity(x) + self.state_linearity(h_t)
        i, f, g, o = gates.chunk(4, dim=-1)
        # 对门控值应用sigmoid和tanh激活函数
        i, f, g, o = torch.sigmoid(i), torch.sigmoid(f), torch.tanh(g), torch.sigmoid(o)
        # 更新细胞状态
        c_t = f * c_t + i * g
        # 计算隐藏状态
        h_t = o * torch.tanh(c_t)
        # 投影隐藏状态
        h_t = self.state_projection(h_t)
        return h_t, c_t


class ElmobiLm(nn.Module):
    def __init__(self, input_dim=512, hidden_dim=4096, proj_dim=512):
        super(ElmobiLm, self).__init__()

        # 前向LSTM层1
        self.forward_layer_0 = LstmCellWithProjection(input_dim, hidden_dim, proj_dim)
        # 后向LSTM层1
        self.backward_layer_0 = LstmCellWithProjection(input_dim, hidden_dim, proj_dim)
        # 前向LSTM层2
        self.forward_layer_1 = LstmCellWithProjection(input_dim, hidden_dim, proj_dim)
        # 后向LSTM层2
        self.backward_layer_1 = LstmCellWithProjection(input_dim, hidden_dim, proj_dim)

    def forward(self, x):
        batch_size, seq_len, _ = x.size()
        # 初始化前向和后向LSTM的隐藏状态和细胞状态
        h_forward, c_forward = torch.zeros(batch_size, 512), torch.zeros(batch_size, 4096)
        h_backward, c_backward = torch.zeros(batch_size, 512), torch.zeros(batch_size, 4096)

        # 前向传播
        forward_outputs = []
        for t in range(seq_len):
            # 获取当前时间步的输入
            # 通过前向LSTM层
            h_forward, c_forward = self.forward_layer_0(x[:, t, :], (h_forward, c_forward))
            # 保存每个时间步的输出
            forward_outputs.append(h_forward)
        # 将所有时间步的输出堆叠起来
        forward_outputs = torch.stack(forward_outputs, dim=1)

        # 后向传播
        backward_outputs = []
        for t in reversed(range(seq_len)):
            # 获取当前时间步的输入
            # 通过后向LSTM层
            h_backward, c_backward = self.backward_layer_0(x[:, t, :], (h_backward, c_backward))
            # 将输出添加到列表的开头，以保持正确的顺序
            backward_outputs.insert(0, h_backward)
        # 将所有时间步的输出堆叠起来
        backward_outputs = torch.stack(backward_outputs, dim=1)

        # 将前向和后向LSTM的输出拼接在一起
        return torch.cat([forward_outputs, backward_outputs], dim=-1)


class Model(nn.Module):
    def __init__(self, vocab_size, char_vocab_size):
        super(Model, self).__init__()
        # 词嵌入与字符嵌入编码
        self.token_embedder = ConvTokenEmbedder(vocab_size, char_vocab_size)
        # Elmo 双向lstm编码器
        self.encoder = ElmobiLm()

    def forward(self, word_ids, char_ids):
        # 获取词和字符的嵌入
        embeddings = self.token_embedder(word_ids, char_ids)
        # 通过ELMo编码器
        output = self.encoder(embeddings)
        return output


def main():
    # 设置随机种子，以便结果可复现
    torch.manual_seed(42)

    # 1. 定义模型的超参数
    vocab_size = 1000  # 词汇表大小
    char_vocab_size = 50  # 字符词汇表大小
    word_emb_dim = 100
    char_emb_dim = 50
    output_dim = 512
    # 2. 创建模型实例
    model = Model(vocab_size, char_vocab_size)

    # 3. 创建一些示例输入数据
    batch_size = 2
    seq_len = 5  # 序列长度 (单词数量)
    char_len = 7  # 每个单词的最大字符数

    # 随机生成单词ID和字符ID
    word_ids = torch.randint(0, vocab_size, (batch_size, seq_len))
    char_ids = torch.randint(0, char_vocab_size, (batch_size, seq_len, char_len))

    print("输入的单词ID:\n", word_ids)
    print("输入的字符ID:\n", char_ids)

    # 4. 通过 ConvTokenEmbedder 获取词嵌入和字符嵌入
    embeddings = model.token_embedder(word_ids, char_ids)
    print("\nConvTokenEmbedder 后的嵌入向量形状:", embeddings.shape)  # 应该为 (batch_size, seq_len, output_dim)

    # 5. 手动分解 ConvTokenEmbedder 的步骤 (可选，用于更详细的检查)
    print("\n--- ConvTokenEmbedder 内部的详细步骤 ---")
    word_emb = model.token_embedder.word_emb_layer(word_ids)
    char_emb = model.token_embedder.char_emb_layer(char_ids)
    print("词嵌入的形状:", word_emb.shape)  # (batch_size, seq_len, word_emb_dim)
    print("字符嵌入的形状:", char_emb.shape)  # (batch_size, seq_len, char_len, char_emb_dim)

    b, s, c, d = char_emb.shape
    char_emb_reshaped = char_emb.view(b * s, d, c)  # 调整形状以适应 Conv1D
    print("调整后的字符嵌入形状:", char_emb_reshaped.shape)

    conv_outputs = [torch.relu(conv(char_emb_reshaped)) for conv in model.token_embedder.convolutions]
    print("卷积输出的数量:", len(conv_outputs))
    print("一个卷积输出的形状:", conv_outputs[0].shape)

    pooled_outputs = [torch.max(co, dim=2)[0] for co in conv_outputs]
    print("池化输出的数量:", len(pooled_outputs))
    print("一个池化输出的形状:", pooled_outputs[0].shape)

    char_features = torch.cat(pooled_outputs, dim=1)
    print("字符特征的形状:", char_features.shape)

    for i, highway in enumerate(model.token_embedder.highways):
        char_features = torch.relu(highway(char_features))
        print(f"经过第 {i + 1} 个 Highway 层后的字符特征形状:", char_features.shape)

    char_features = char_features.view(batch_size, seq_len, -1)
    print("调整后的字符特征形状:", char_features.shape)

    combined = torch.cat([word_emb, char_features], dim=-1)
    print("合并后的词和字符特征形状:", combined.shape)

    projected = model.token_embedder.projection(combined)
    print("投影后的嵌入向量形状:", projected.shape)
    print("--- ConvTokenEmbedder 详细步骤结束 ---\n")

    # 6. 通过 ElmobiLm 获取 ELMo 表示
    elmo_output = model.encoder(embeddings)
    print("ELMo 输出的形状:", elmo_output.shape)  # 应该为 (batch_size, seq_len, 2 * proj_dim)

    # 7. 手动分解 ElmobiLm 的步骤 (可选)
    print("\n--- ElmobiLm 内部的详细步骤 ---")
    # 初始化隐藏状态
    h_forward, c_forward = torch.zeros(batch_size, 512), torch.zeros(batch_size, 4096)
    h_backward, c_backward = torch.zeros(batch_size, 512), torch.zeros(batch_size, 4096)

    forward_outputs = []
    for t in range(seq_len):
        h_forward, c_forward = model.encoder.forward_layer_0(embeddings[:, t, :], (h_forward, c_forward))
        print(f"前向传播，时间步 {t}: h_forward 形状: {h_forward.shape}, c_forward 形状: {c_forward.shape}")
        forward_outputs.append(h_forward)
    forward_outputs = torch.stack(forward_outputs, dim=1)
    print("前向传播输出形状", forward_outputs.shape)

    backward_outputs = []
    for t in reversed(range(seq_len)):
        h_backward, c_backward = model.encoder.backward_layer_0(embeddings[:, t, :], (h_backward, c_backward))
        print(f"反向传播，时间步 {t}: h_backward 形状: {h_backward.shape}, c_backward 形状: {c_backward.shape}")
        backward_outputs.insert(0, h_backward)
    backward_outputs = torch.stack(backward_outputs, dim=1)
    print("反向传播输出形状", backward_outputs.shape)

    print("--- ElmobiLm 详细步骤结束 ---")


if __name__ == "__main__":
    main()
