import torch
from torch import nn
from tokenizer import ChineseCharacterTokenizer

SEQ_LENGTH = 25


class SelfAttention(nn.Module):
    def __init__(self, embed_dim=252, num_heads=12, dropout=0.1):
        super(SelfAttention, self).__init__()

        # Multi-Head Self Attention
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads

        # Query, Key, and Value projections
        self.c_attn = nn.Conv1d(embed_dim, embed_dim * 3, kernel_size=1, bias=False)
        self.c_proj = nn.Conv1d(embed_dim, embed_dim, kernel_size=1, bias=False)

        # Dropout layers
        self.attn_dropout = nn.Dropout(p=dropout, inplace=False)
        self.resid_dropout = nn.Dropout(p=dropout, inplace=False)

    def forward(self, x, mask=None):
        # Input x: [batch_size, seq_length, embed_dim]

        batch_size, seq_length, embed_dim = x.size()

        # Project the input into query, key, and value
        qkv = self.c_attn(x.transpose(1, 2))  # [batch_size, 3 * embed_dim, seq_length]
        q, k, v = qkv.chunk(3, dim=1)  # Split into query, key, and value

        # Reshape for multi-head attention
        q = q.view(batch_size, self.num_heads, self.head_dim, seq_length).transpose(2,
                                                                                    3)  # [batch_size, num_heads, seq_length, head_dim]
        k = k.view(batch_size, self.num_heads, self.head_dim, seq_length).transpose(2,
                                                                                    3)  # [batch_size, num_heads, seq_length, head_dim]
        v = v.view(batch_size, self.num_heads, self.head_dim, seq_length).transpose(2,
                                                                                    3)  # [batch_size, num_heads, seq_length, head_dim]

        # Compute attention scores and apply softmax
        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / (
                self.head_dim ** 0.5)  # [batch_size, num_heads, seq_length, seq_length]
        if mask is not None:
            mask = mask.unsqueeze(1).unsqueeze(2)  # [batch_size ,1 ,1 ,seq_length]
            mask = mask.expand_as(attn_scores)  # [batch_size ,num_heads ,seq_length ,seq_length]
            attn_scores = attn_scores.masked_fill(mask == 0, -1e9)  # 将mask中为0的位置对应的attn_scores设置为一个很小的负数
        attn_probs = nn.functional.softmax(attn_scores, dim=-1)  # Apply softmax along the last dimension
        # Apply attention dropout
        attn_probs = self.attn_dropout(attn_probs)
        # Compute weighted sum of values
        attn_output = torch.matmul(attn_probs, v)  # [batch_size, num_heads, seq_length, head_dim]

        # Reshape and concatenate heads
        attn_output = attn_output.transpose(2, 3).contiguous().view(batch_size, seq_length,
                                                                    embed_dim)  # [batch_size, seq_length, embed_dim]

        # Project back to model's dimension
        attn_output = self.c_proj(attn_output.transpose(1, 2))  # [batch_size, embed_dim, seq_length]
        attn_output = self.resid_dropout(attn_output)
        attn_output = attn_output.transpose(1, 2)
        # Add residual connection and LayerNorm
        output = x + attn_output
        return output


class MLP(nn.Module):
    def __init__(self, embed_dim=252, drop_rate=0.081024):
        super(MLP, self).__init__()

        # Point-wise feedforward network
        self.c_fc = nn.Conv1d(embed_dim, embed_dim * 4, kernel_size=1, bias=True)
        self.c_proj = nn.Conv1d(embed_dim * 4, embed_dim, kernel_size=1, bias=True)
        self.act = nn.ReLU()
        # Dropout layer
        self.dropout = nn.Dropout(p=drop_rate, inplace=False)

    def forward(self, x):
        # Input x: [batch_size, seq_length, embed_dim]

        # Apply feedforward network and activation
        h = self.act(self.c_fc(x.transpose(1, 2)))  # [batch_size, 4 * embed_dim, seq_length]
        h2 = self.c_proj(h)  # [batch_size, embed_dim, seq_length]

        # Apply dropout
        h2 = self.dropout(h2)
        h2 = h2.transpose(1, 2)
        # Add residual connection
        output = x + h2
        return output


class SelfAttentionBlock(nn.Module):
    def __init__(self, embedding_dim=252, head_num=12, dropout=0.081024):
        super(SelfAttentionBlock, self).__init__()
        self.ln_1 = nn.LayerNorm(embedding_dim, eps=1e-05, elementwise_affine=True)
        self.attn = SelfAttention(embedding_dim, head_num)
        self.ln_2 = nn.LayerNorm(embedding_dim, eps=1e-05, elementwise_affine=True)
        self.mlp = MLP(embedding_dim, dropout)

    def forward(self, x, mask=None):
        # Apply LayerNorm1
        x_normalized = self.ln_1(x)
        # Apply the self-attention mechanism (assuming self.attn is a nn.Module)
        attn_output = self.attn(x_normalized, mask)  # Adjust this line based on input shape
        # Residual connection and LayerNorm2
        x_residual = x + attn_output
        x_normalized_residual = self.ln_2(x_residual)
        # Apply MLP
        mlp_output = self.mlp(x_normalized_residual)  # Adjust this line based on input shape
        # Residual connection and final output
        output = x_residual + mlp_output
        return output


def get_mask_by_seq_lens(input_lens):
    res = torch.zeros(len(input_lens), SEQ_LENGTH, dtype=torch.bool)
    for i, l in enumerate(input_lens):
        if l > SEQ_LENGTH:
            l = SEQ_LENGTH
        res[i, :l] = 1
    return res


class Seq2SeqAttentionEncoder(nn.Module):
    """⽤于序列到序列学习的循环神经⽹络编码器"""

    def __init__(self, vocab_size, head_num=2, embed_size=32, num_layers=2,
                 dropout=0.081024, **kwargs):
        super(Seq2SeqAttentionEncoder, self).__init__()
        # 嵌⼊层
        self.wte = nn.Embedding(vocab_size, embed_size)
        self.wpe = nn.Embedding(1024, embedding_dim=embed_size)
        self.drop = nn.Dropout(p=dropout, inplace=False)
        self.h = nn.ModuleList(
            [SelfAttentionBlock(embedding_dim=embed_size, head_num=head_num, dropout=dropout) for _ in
             range(num_layers)])
        self.ln_f = nn.LayerNorm(embed_size, eps=1e-05, elementwise_affine=True)

    def forward(self, input_ids, input_lens, *args):
        # input_ids shape: (batch_size, sequence_length)
        # Embedding
        wte_output = self.wte(input_ids)  # shape: (batch_size, sequence_length, 252)
        b, t, c = wte_output.size()
        # Positional encodings
        wpe_output = self.wpe(torch.arange(t, device=wte_output.device))[None, :, :].expand(b, t, c)
        # wpe_output shape: (batch_size, sequence_length, 252)

        # Combine embeddings and positional encodings
        hidden_states = wte_output + wpe_output
        # hidden_states shape: (batch_size, sequence_length, 252)

        # Apply dropout
        hidden_states = self.drop(hidden_states)
        # hidden_states shape: (batch_size, sequence_length, 252)
        mask = get_mask_by_seq_lens(input_lens)
        # Loop through selfAttentionBlocks
        for block in self.h:
            hidden_states = block(hidden_states)
            # hidden_states shape: (batch_size, sequence_length, 252)

        # Apply LayerNorm
        output = self.ln_f(hidden_states)
        # output shape: (batch_size, sequence_length, 252)

        return output


class Classifier(nn.Module):
    def __init__(self):
        super(Classifier, self).__init__()
        self.conv2d = nn.Conv1d(32, 1, kernel_size=3, stride=1, padding=1)
        self.linear = nn.Linear(25, 5)

    def forward(self, x):
        x = x.transpose(1, 2)
        x = self.conv2d(x)
        x = x.squeeze(1)
        x = self.linear(x)
        return x


class DoubanClassifierModel(nn.Module):
    def __init__(self, vocab_size):
        super(DoubanClassifierModel, self, ).__init__()
        self.emb = Seq2SeqAttentionEncoder(vocab_size=vocab_size)
        self.classifier = Classifier()

    def forward(self, x, x_lens):
        x = x.to(torch.long)
        emb = self.emb(x, x_lens)
        x = self.classifier(emb)
        return x


if __name__ == "__main__":
    chinese_tokenizer = ChineseCharacterTokenizer()
    chinese_tokenizer.build_vocab("../my_spider_douban_formal/output/clean_data.jsonl")
    model = Seq2SeqAttentionEncoder(len(chinese_tokenizer))
    inputs = torch.randint(0, 100, (123, 25))
    inputs_len = torch.randint(0, 25, (123,))
    output = model(inputs, inputs_len)
    print(output.shape)
    model2 = Classifier()
    output2 = model2(output)
    print(output2.shape)
    model3 = DoubanClassifierModel(len(chinese_tokenizer))
    output3 = model3(inputs, inputs_len)
    print(output3.shape)
