import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
from models.sit import SiT_models
from diffusers.models import AutoencoderKL
from utils import load_encoders
import os

# 设置随机种子
import random
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
set_seed(42)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# ------------------ 参数配置 ------------------
# method ='dino'
method = 'tanh'
method = 'test'
checkpoint_path = "exps/linear-dinov2-b-enc8-cifar/checkpoints/0040000.pt"  # 替换成你保存的ckpt
# checkpoint_path = "exps/linear-dinov2-b-enc8-cifar/checkpoints/clean_0040000.pt"  # 替换成你保存的ckpt
# checkpoint_path = "exps/linear-dinov2-b-enc8-cifar/checkpoints/tanh-0040000.pt"  # 替换成你保存的ckpt
enc_type = "dinov2-vit-b"
model_type = "SiT-B/2"  # 替换成你用的模型名，如 sit_base_patch16
num_classes = 1000
resolution = 256
latent_size = resolution // 8
batch_size = 128
n_samples = 10000  # 可视化样本数量

# ------------------ 载入模型 ------------------
checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=False)


# 加载 encoder & VAE
encoders, encoder_types, architectures = load_encoders(enc_type, device, resolution)
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device)

latents_scale = torch.tensor([0.18215]*4).view(1, 4, 1, 1).to(device)
latents_bias = torch.tensor([0.]*4).view(1, 4, 1, 1).to(device)


block_kwargs = {"fused_attn": True, "qk_norm": False}
# 加载主模型
z_dims = [encoder.embed_dim for encoder in encoders]
model = SiT_models[model_type](
    input_size=latent_size,
    num_classes=num_classes,
    use_cfg=False,
    z_dims=z_dims,
    encoder_depth=8,
    **block_kwargs,
).to(device)
# 加载 checkpoint

# 处理 state_dict，移除 'module.' 前缀
state_dict = checkpoint['model']
new_state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}

# 加载到模型
model.load_state_dict(new_state_dict, strict=True)

model.eval()

# ------------------ 数据准备 (CIFAR10) ------------------
transform = transforms.Compose([
    transforms.Resize((resolution, resolution)),
    transforms.ToTensor()
])
cifar10 = datasets.CIFAR10(root='/home/zuwenqiang/Respo2/Testcode/data', train=True, transform=transform, download=False)
dataloader = DataLoader(cifar10, batch_size=batch_size, shuffle=False)

# ------------------ 特征提取 ------------------
# ------------------ 特征提取（基于SiT的proj输出） ------------------
num_collected = 0
features, labels = [], []
with torch.no_grad():
    for imgs, lbls in dataloader:
        bsz = imgs.size(0)
        if num_collected >= n_samples:
            break
        imgs = imgs.to(device)

        x = vae.encode(imgs * 2 - 1).latent_dist.sample()
        x = x * latents_scale + latents_bias

        t = torch.zeros(bsz, dtype=torch.float32, device=device)
        y = torch.zeros(bsz, dtype=torch.long, device=device)

        _, zs_tilde = model(x, t, y)

        z = zs_tilde[0]  # [B, T, D]
        z = F.normalize(z, dim=-1)
        z = z.mean(dim=1)  # [B, D]

        features.append(z.cpu())
        labels.append(lbls.cpu())


        num_collected += bsz



features = torch.cat(features, dim=0)[:n_samples].numpy()
labels = torch.cat(labels, dim=0)[:n_samples].numpy()



# ------------------ t-SNE ------------------
tsne = TSNE(n_components=2, init='pca', random_state=0, perplexity=30)
X_tsne = tsne.fit_transform(features)

# ------------------ 可视化 ------------------
plt.figure(figsize=(8, 8))
colors = plt.cm.get_cmap("tab10", 10)
for i in range(10):
    idx = labels == i
    plt.scatter(X_tsne[idx, 0], X_tsne[idx, 1], label=str(i), color=colors(i), s=10)
plt.legend()
plt.title("t-SNE of CIFAR10 Features")
plt.tight_layout()
plt.savefig(f'{method}.png')
