import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from scipy.spatial.distance import cosine
from scipy.stats import pearsonr, wasserstein_distance, entropy


def measure(cls_token, last_layer_embedding, logit_scale):
    # last_layer_embedding = last_layer_embedding.squeeze(0)

    # 缩放后的特征
    scaled_cls_token = logit_scale * cls_token / cls_token.norm(dim=1, keepdim=True)
    scaled_embedding = logit_scale * last_layer_embedding / last_layer_embedding.norm(dim=1, keepdim=True)

    # 统计特性分析
    print("Mean and Variance of CLS Token:", torch.mean(cls_token), torch.var(cls_token))
    print("Mean and Variance of Scaled CLS Token:", torch.mean(scaled_cls_token), torch.var(scaled_cls_token))
    print("Mean and Variance of Embedding:", torch.mean(last_layer_embedding), torch.var(last_layer_embedding))
    print("Mean and Variance of Scaled Embedding:", torch.mean(scaled_embedding), torch.var(scaled_embedding))

    scaled_embedding_ = scaled_embedding.view(4, -1)
    last_layer_embedding_ = last_layer_embedding.view(4, -1)

    # 可视化 - PCA
    pca = PCA(n_components=3)
    pca_cls = pca.fit_transform(cls_token)
    pca_scaled_cls = pca.fit_transform(scaled_cls_token)
    pca_embedding = pca.fit_transform(last_layer_embedding_)
    pca_scaled_embedding = pca.fit_transform(scaled_embedding_)

    plt.figure(figsize=(12, 6))
    plt.subplot(1, 2, 1)
    plt.scatter(pca_cls[:, 0], pca_cls[:, 1], label='CLS Token')
    plt.scatter(pca_embedding[:, 0], pca_embedding[:, 1], label='Embedding')
    plt.legend()
    plt.title('PCA - CLS Token and Embedding')

    plt.subplot(1, 2, 2)
    plt.scatter(pca_scaled_cls[:, 0], pca_scaled_cls[:, 1], label='Scaled CLS Token')
    plt.scatter(pca_scaled_embedding[:, 0], pca_scaled_embedding[:, 1], label='Scaled Embedding')
    plt.legend()
    plt.title('PCA - Scaled CLS Token and Scaled Embedding')

    plt.savefig(r"D:\Project\multimoding\project\CLIP\classify\res\PCA.png")

    # 分布距离度量
    # kl_div = entropy(cls_token.numpy().flatten(), last_layer_embedding.numpy().flatten())
    wasserstein_dist = wasserstein_distance(cls_token.numpy().flatten(), last_layer_embedding.numpy().flatten())
    # js_div = 0.5 * (entropy(cls_token.numpy().flatten(), last_layer_embedding.numpy().flatten()) + entropy(last_layer_embedding.numpy().flatten(), cls_token.numpy().flatten()))

    # print("KL Divergence:", kl_div)
    print("Wasserstein Distance:", wasserstein_dist)
    # print("JS Divergence:", js_div)

    # 特征相似性分析
    cos_sim = 1 - cosine(cls_token.flatten(), last_layer_embedding.flatten())
    pearson_corr, _ = pearsonr(cls_token.flatten(), last_layer_embedding.flatten())

    print("Cosine Similarity:", cos_sim)
    print("Pearson Correlation:", pearson_corr)

    pass
