import tensorflow as tf
from tensorflow.keras import layers, Model

# 构建 SimSiam 模型结构：编码器（encoder）+ 投影 MLP + 预测 MLP
def build_backbone(input_shape=(640, 640, 3)):
    # MobileNetV2 是特征提取器（主干网络）
    # 输出维度：1280（是 MobileNetV2 的默认输出）
    base_model = tf.keras.applications.MobileNetV2(
        include_top=False,
        weights=None,  # 不加载预训练权重（自监督）
        input_shape=input_shape,
        pooling='avg'  # 输出 1280-d 向量
    )
    return base_model

#  Projection MLP：把 encoder 输出的 1280 维 → 256 维 latent 表示
# 这个投影空间是对比学习发生的位置（z）
def build_projection_mlp(input_dim, projection_dim=256, hidden_dim=2048):
    return tf.keras.Sequential([
        layers.Dense(hidden_dim, activation='relu'),
        layers.Dense(projection_dim)
    ], name='projection_mlp')

# Prediction MLP：让模型预测另一张图的 projection（p）
def build_prediction_mlp(input_dim, hidden_dim=512, output_dim=256):
    return tf.keras.Sequential([
        layers.Dense(hidden_dim, activation='relu'),
        layers.Dense(output_dim)
    ], name='prediction_mlp')

class SimSiam(Model):
    def __init__(self, input_shape=(640, 640, 3), projection_dim=256):
        super(SimSiam, self).__init__()
        self.encoder = build_backbone(input_shape)
        self.projector = build_projection_mlp(input_dim=1280, projection_dim=projection_dim)
        self.predictor = build_prediction_mlp(input_dim=projection_dim)

    def call(self, x1, x2, training=False):
        # x1, x2: 两张不同增强后的图像（batch）
        z1 = self.projector(self.encoder(x1, training=training), training=training)  # projection(z1)
        z2 = self.projector(self.encoder(x2, training=training), training=training)  # projection(z2)

        p1 = self.predictor(z1, training=training)  # predictor(z1)
        p2 = self.predictor(z2, training=training)  # predictor(z2)

        return p1, z2, p2, z1  # 用于计算 stop-gradient 余弦损失
