import torch
from torch import nn
from torch.nn import functional as F

class VectorQuantizer(nn.Module):
    def __init__(self, n_states, z_dim):
        super(VectorQuantizer, self).__init__()

        self.embedding = nn.Embedding(n_states, z_dim)
        self.embedding.weight.data.uniform_(-1/n_states, 1/n_states)

        self.commitment_cost = 0.25
        self.n_states = n_states

    def forward(self, inputs):
        distances = (torch.sum(inputs**2, dim=1, keepdim=True) +
                     torch.sum(self.embedding.weight**2, dim=1) -
                     2 * torch.matmul(inputs, self.embedding.weight.t()))
        encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)


        encodings = torch.zeros(encoding_indices.shape[0], self.n_states).to(inputs.device)
        encodings.scatter_(1, encoding_indices, 1)

        quantized = torch.matmul(encodings, self.embedding.weight)

        q_latent_loss = F.mse_loss(quantized.detach(), inputs)
        e_latent_loss = F.mse_loss(quantized, inputs.detach())
        loss = q_latent_loss + self.commitment_cost * e_latent_loss

        quantized = inputs + (quantized - inputs).detach()
        avg_probs = torch.mean(encodings, dim=0)
        perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))

        return loss, quantized, perplexity, encodings
    


class VQVAE(nn.Module):
    def __init__(self, encoder, decoder, z_dim, n_states):
        super(VQVAE, self).__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.vq = VectorQuantizer(n_states, z_dim)

    def forward(self, spatial_x, seq_x):
        z = self.encoder(spatial_x, seq_x)
        loss, quantized, perplexity, encodings = self.vq(z)
        # hat_x = self.decoder(quantized)
        hat_x = self.decoder(z)
        return loss, hat_x, perplexity, encodings
    

class ResNetBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResNetBlock, self).__init__()
        
        # 残差模块包括卷积、批归一化和ReLU激活
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm1d(out_channels)
        
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm1d(out_channels)
        
        # 如果输入和输出的通道数不一样，使用1x1卷积调整通道数
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride),
                nn.BatchNorm1d(out_channels)
            )
        
    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)  # 残差连接
        out = F.relu(out)
        return out
    

class Encoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, z_dim):
        super(Encoder, self).__init__()

        self.conv1 = nn.Conv1d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm1d(hidden_dim)

        self.encoder_blocks = nn.ModuleList([
            ResNetBlock(hidden_dim, hidden_dim) for _ in range(12)
        ])

        self.block1 = ResNetBlock(hidden_dim, hidden_dim)
        self.block2 = ResNetBlock(hidden_dim, hidden_dim)
        self.block3 = ResNetBlock(hidden_dim, hidden_dim)
        self.block4 = ResNetBlock(hidden_dim, hidden_dim)
        self.block5 = ResNetBlock(hidden_dim, hidden_dim)
        self.block6 = ResNetBlock(hidden_dim, hidden_dim)

        self.embedding = nn.Embedding(1024, hidden_dim)
        self.seq_fc = nn.Linear(hidden_dim, hidden_dim)

        self.fc1 = nn.Linear(hidden_dim * 2, hidden_dim)
        self.bn3 = nn.BatchNorm1d(hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim // 2)
        self.bn4 = nn.BatchNorm1d(hidden_dim // 2)
        self.fc3 = nn.Linear(hidden_dim // 2, z_dim)


    def forward(self, spatial_x, seq_x):
        x = spatial_x.permute(0, 2, 1)
        x = F.relu(self.bn1(self.conv1(x)))

        for block in self.encoder_blocks:
            x = block(x)

        x = x.mean(dim=-1)

        seq_x = self.embedding(seq_x.long()).squeeze(1)
        seq_x = F.relu(self.seq_fc(seq_x))

        combined = torch.cat([x, seq_x], dim=1)
        combined = F.relu(self.bn3(self.fc1(combined)))
        combined = F.relu(self.bn4(self.fc2(combined)))
        combined = self.fc3(combined)

        return combined
    

class Decoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, z_dim):
        super(Decoder, self).__init__()
        self.step_num = 500
        self.layer_num = 1
        self.hidden_dim = hidden_dim
        self.input_dim = input_dim

        self.fc1 = nn.Linear(z_dim, hidden_dim // 2)
        self.bn1 = nn.BatchNorm1d(hidden_dim // 2)
        self.fc2 = nn.Linear(hidden_dim // 2, hidden_dim)
        self.bn2 = nn.BatchNorm1d(hidden_dim)

        self.transposed_conv = nn.ConvTranspose1d(
            in_channels=hidden_dim,  # 输入通道数
            out_channels=hidden_dim,  # 输出通道数
            kernel_size=self.step_num,  # 卷积核大小
            stride=1,  # 步长
            padding=0  # 填充
        )
        self.fc3 = nn.Linear(hidden_dim, input_dim * self.step_num)

    def forward(self, x):
        x = F.relu(self.bn1(self.fc1(x)))
        x = F.relu(self.bn2(self.fc2(x)))
        x = self.fc3(F.relu(x)).view(-1, self.step_num, self.input_dim)
        return x




