# GAN判别器
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import math  # 导入math模块

class Discriminator(nn.Module):
    def __init__(self, input_dim=256, nhead=8, num_layers=6):
        super().__init__()
        # 坐标嵌入层（匹配生成器输出的路径坐标维度）
        self.coord_embed = nn.Linear(2, input_dim)
        
        # Transformer编码器
        encoder_layers = TransformerEncoderLayer(
            d_model=input_dim,
            nhead=nhead,
            dim_feedforward=input_dim*4,
            batch_first=True
        )
        self.transformer = TransformerEncoder(encoder_layers, num_layers=num_layers)
        
        # 分类头
        self.classifier = nn.Sequential(
            nn.Linear(input_dim, 128),
            nn.LeakyReLU(0.2),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
        
        # 位置编码（路径顺序关键）
        self.pos_encoder = PositionalEncoding(input_dim)

    def forward(self, x):
        """
        输入格式: (batch_size, seq_len, 2)
        输出格式: (batch_size, 1)
        """
        # 坐标嵌入 + 位置编码
        x = self.coord_embed(x)
        x = self.pos_encoder(x)
        
        # 通过Transformer
        memory = self.transformer(x)
        
        # 取最后一个token作为全局特征
        global_feat = memory[:, -1, :]
        
        return self.classifier(global_feat)

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super().__init__()
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(1, max_len, d_model)
        pe[0, :, 0::2] = torch.sin(position * div_term)
        pe[0, :, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:, :x.size(1), :]
        return x