import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import torch.nn.functional as F
import torch
from Neural_Networks import P2D_GE_Net, CNNNet, ProtoNetClassifier, DN4Net, GNNImageClassifier
from train import aLL, Data_loading
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.preprocessing import StandardScaler
import numpy as np

def CNN_extract_features(model, dataloader, device):
    model.eval()
    features = []
    labels = []
    with torch.no_grad():
        for inputs, targets in dataloader:
            # print(targets.unique())
            inputs = inputs.to(device)
            targets = targets.to(device)
            # ======= 手动前向传播到 liner_2 输出（256维） =======
            x = F.relu(model.conv1(inputs))
            x = model.pool(x)
            x = model.bn1(x)
            x = F.relu(model.conv2(x))
            x = model.pool(x)
            x = model.bn2(x)
            x = F.relu(model.conv3(x))
            x = model.pool(x)
            x = model.bn3(x)
            x = x.view(x.size(0), -1)
            x = F.relu(model.liner_1(x))
            x = model.bn_l1(x)
            x = model.drop(x)
            x = F.relu(model.liner_2(x))
            x = model.bn_l2(x)
            x = model.drop(x)
            features.append(x.cpu())
            labels.append(targets.cpu())
    return torch.cat(features), torch.cat(labels)


def Proto_extract_features(model, dataloader, device, num_samples=3000, perplexity=30):
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for i, (imgs, lbls) in enumerate(dataloader):
            imgs = imgs.to(device)
            feats = model.embedding(imgs)              # 只用卷积部分提取特征
            feats = feats.view(feats.size(0), -1)     # 展平

            features.append(feats.cpu())
            labels.append(lbls)

            if sum(len(f) for f in features) >= num_samples:
                break

    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]

    # 标准化
    features = StandardScaler().fit_transform(features)

    return features, labels

def GNN_extract_features(model, dataloader, device, num_samples=10000, perplexity=30):
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for imgs, lbls in dataloader:
            imgs = imgs.to(device)
            # 仅提取 encoder 输出的特征，不经过 GNN 和 classifier
            feats = model.embedding(imgs)
            feats = model.classifier(feats)               # [B, 64, 1, 1]
            feats = feats.view(feats.size(0), -1)     # [B, 64]

            features.append(feats.cpu())
            labels.append(lbls)

            if sum(len(f) for f in features) >= num_samples:
                break

    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]

    # 标准化
    features = StandardScaler().fit_transform(features)

    # t-SNE 降维
    tsne = TSNE(n_components=2, perplexity=perplexity, init='pca', random_state=42)
    reduced = tsne.fit_transform(features)

    return reduced, labels.numpy()


def DN4_extract_features(model, dataloader, device, num_samples=5000, perplexity=30):
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for imgs, lbls in dataloader:
            imgs = imgs.to(device)
            feats = model.encoder(imgs)               # shape: [B, 64, H, W]
            feats = feats.view(feats.size(0), -1)     # flatten to [B, 4096]
            feats = model.classifier(feats)

            features.append(feats.cpu())
            labels.append(lbls)

            if sum(len(f) for f in features) >= num_samples:
                break

    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]
    # 标准化
    features = StandardScaler().fit_transform(features)
    # t-SNE 降维
    tsne = TSNE(n_components=2, perplexity=perplexity, init='pca', random_state=42)
    reduced = tsne.fit_transform(features)
    return reduced, labels.numpy()


def DSResNet_extract_features(model, GADF_dl, CWT_dl, device, max_samples=None):
    """
    从 P2D_GE_Net 模型中提取全连接层前的 pooled 特征
    Args:
        model: DSResNet 模型
        GADF_dl: GADF 数据加载器
        CWT_dl: CWT 数据加载器
        device: torch.device
        max_samples: 可选，提取的最大样本数
    Returns:
        features: Tensor [N, channels]
        labels: Tensor [N]
    """
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for ((gadf_imgs, lbls), (cwt_imgs, _)) in zip(GADF_dl, CWT_dl):
            gadf_imgs = gadf_imgs.to(device)
            cwt_imgs = cwt_imgs.to(device)
            lbls = lbls.to(device)

            # ===== GADF 分支 =====
            gadf = F.relu(model.gadf_bn1(model.gadf_conv1(gadf_imgs)))
            gadf = F.relu(model.gadf_bn2(model.gadf_conv2(gadf)))
            gadf = model.gadf_pool1(gadf)
            gadf = model.gadf_res1(gadf)
            gadf = model.gadf_pool2(gadf)

            # ===== CWT 分支 =====
            cwt = F.relu(model.cwt_bn1(model.cwt_conv1(cwt_imgs)))
            cwt = F.relu(model.cwt_bn2(model.cwt_conv2(cwt)))
            cwt = model.cwt_pool1(cwt)
            cwt = model.cwt_res1(cwt)
            cwt = model.cwt_pool2(cwt)

            # ===== 特征融合 =====
            fused = gadf + cwt

            # ===== 注意力 & 全局池化 =====
            fused = model.attention(fused)
            pooled = model.global_pool(fused)           # [B, C, 1, 1]
            pooled = pooled.view(pooled.size(0), -1)    # [B, C]

            features.append(pooled.cpu())
            labels.append(lbls.cpu())

            if max_samples is not None and sum(f.shape[0] for f in features) >= max_samples:
                break

    # 拼接并截断
    features = torch.cat(features, dim=0)
    labels = torch.cat(labels, dim=0)
    if max_samples is not None:
        features = features[:max_samples]
        labels = labels[:max_samples]

    return features, labels


def DSResNet_GADF_extract_features(model, GADF_train_dl, device, num_samples=5000, perplexity=30):
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for gadf_imgs, lbls in GADF_train_dl:
            gadf_imgs = gadf_imgs.to(device)

            # 从模型中提取全连接层前的 pooled 特征
            gadf = F.relu(model.gadf_bn1(model.gadf_conv1(gadf_imgs)))
            gadf = F.relu(model.gadf_bn2(model.gadf_conv2(gadf)))
            gadf = model.gadf_pool1(gadf)
            gadf = model.gadf_res1(gadf)
            gadf = model.gadf_res2(gadf)
            gadf = model.gadf_pool2(gadf)

            pooled = gadf.view(gadf.size(0), -1)  # [B, 64]

            features.append(pooled.cpu())
            labels.append(lbls)

            if sum(len(f) for f in features) >= num_samples:
                break

    # 拼接并截断
    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]

    # 标准化
    features = StandardScaler().fit_transform(features)

    # t-SNE 降维
    tsne = TSNE(n_components=2, perplexity=perplexity, init='pca', random_state=42)
    reduced = tsne.fit_transform(features)

    return reduced, labels.numpy()


def DSResNetCWT_extract_features(model, CWT_train_dl, device, num_samples=5000, perplexity=30):
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for cwt_imgs, lbls in CWT_train_dl:
            cwt_imgs = cwt_imgs.to(device)

            cwt = F.relu(model.cwt_bn1(model.cwt_conv1(cwt_imgs)))
            cwt = F.relu(model.cwt_bn2(model.cwt_conv2(cwt)))
            cwt = model.cwt_pool1(cwt)
            cwt = model.cwt_res1(cwt)
            cwt = model.cwt_res2(cwt)
            cwt = model.cwt_pool2(cwt)

            pooled = cwt.view(cwt.size(0), -1)  # [B, 64]

            features.append(pooled.cpu())
            labels.append(lbls)

            if sum(len(f) for f in features) >= num_samples:
                break

    # 拼接并截断
    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]

    # 标准化
    features = StandardScaler().fit_transform(features)

    # t-SNE 降维
    tsne = TSNE(n_components=2, perplexity=perplexity, init='pca', random_state=42)
    reduced = tsne.fit_transform(features)

    return reduced, labels.numpy()


def P2D_DSResNet_1_extract_features(model, GADF_train_dl, CWT_train_dl, device, num_samples=5000, perplexity=30):
    """
    从 P2D_DSResNet_1 模型中提取融合特征，并进行 t-SNE 降维
    Args:
        model: 已加载权重的 P2D_DSResNet_1 模型
        GADF_train_dl: GADF 图像 DataLoader
        CWT_train_dl: CWT 图像 DataLoader
        device: torch.device
        num_samples: t-SNE 最大样本数量
        perplexity: t-SNE perplexity
    Returns:
        reduced: t-SNE 降维后的二维特征 [num_samples, 2]
        labels: 对应标签 [num_samples]
    """
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        # 同步遍历两个 DataLoader
        for (gadf_imgs, lbls), (cwt_imgs, _) in zip(GADF_train_dl, CWT_train_dl):
            gadf_imgs = gadf_imgs.to(device)
            cwt_imgs = cwt_imgs.to(device)

            # ======== GADF 分支 ========
            gadf = F.relu(model.gadf_bn1(model.gadf_conv1(gadf_imgs)))
            gadf = F.relu(model.gadf_bn2(model.gadf_conv2(gadf)))
            gadf = model.gadf_pool1(gadf)
            gadf = model.gadf_res1(gadf)
            gadf = model.gadf_res2(gadf)
            gadf = model.gadf_pool2(gadf)

            # ======== CWT 分支 ========
            cwt = F.relu(model.cwt_bn1(model.cwt_conv1(cwt_imgs)))
            cwt = F.relu(model.cwt_bn2(model.cwt_conv2(cwt)))
            cwt = model.cwt_pool1(cwt)
            cwt = model.cwt_res1(cwt)
            cwt = model.cwt_res2(cwt)
            cwt = model.cwt_pool2(cwt)

            # ======== 融合 ========
            if model.fusion == 'add':
                fused = gadf + cwt
            else:
                fused = torch.cat([gadf, cwt], dim=1)
            fused = model.fuse_proj(fused)

            # 注意力
            fused = model.attention(fused)

            # 全局池化得到特征向量
            pooled = model.global_pool(fused)
            pooled = pooled.view(pooled.size(0), -1)  # [B, C]

            features.append(pooled.cpu())
            labels.append(lbls)

            # 控制样本数量
            if sum(f.shape[0] for f in features) >= num_samples:
                break

    # 拼接并截断
    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]

    # 标准化
    features = StandardScaler().fit_transform(features)

    # t-SNE 降维
    tsne = TSNE(n_components=2, perplexity=perplexity, init='pca', random_state=42)
    reduced = tsne.fit_transform(features)

    return reduced, labels.numpy()

def P2D_DSResNet_V2_extract_features(model, GADF_train_dl, CWT_train_dl, device, num_samples=5000, perplexity=30):
    """
    从 P2D_DSResNet_V2 模型中提取融合特征，并进行 t-SNE 降维
    Args:
        model: 已加载权重的 P2D_DSResNet_V2 模型
        GADF_train_dl: GADF 图像 DataLoader
        CWT_train_dl: CWT 图像 DataLoader
        device: torch.device
        num_samples: t-SNE 最大样本数量
        perplexity: t-SNE perplexity
    Returns:
        reduced: t-SNE 降维后的二维特征 [num_samples, 2]
        labels: 对应标签 [num_samples]
    """
    model.eval()
    features = []
    labels = []

    with torch.no_grad():
        for (gadf_imgs, lbls), (cwt_imgs, _) in zip(GADF_train_dl, CWT_train_dl):
            gadf_imgs = gadf_imgs.to(device)
            cwt_imgs = cwt_imgs.to(device)

            # 使用模型的 _branch_forward 提取每个分支特征
            gadf = model._branch_forward(
                gadf_imgs, model.gadf_conv1, model.gadf_bn1,
                model.gadf_conv2, model.gadf_bn2,
                model.gadf_pool1, model.gadf_res1, model.gadf_res2, model.gadf_pool2
            )
            cwt = model._branch_forward(
                cwt_imgs, model.cwt_conv1, model.cwt_bn1,
                model.cwt_conv2, model.cwt_bn2,
                model.cwt_pool1, model.cwt_res1, model.cwt_res2, model.cwt_pool2
            )

            # Attention fusion
            fused, _ = model.fusion(gadf, cwt)

            # 全局平均池化
            pooled = model.gap(fused).view(fused.size(0), -1)  # [B, C]
            features.append(pooled.cpu())
            labels.append(lbls)

            if sum(f.shape[0] for f in features) >= num_samples:
                break

    # 拼接并截断
    features = torch.cat(features, dim=0)[:num_samples]
    labels = torch.cat(labels, dim=0)[:num_samples]

    # 标准化
    features = StandardScaler().fit_transform(features)

    # t-SNE 降维
    tsne = TSNE(n_components=2, perplexity=perplexity, init='pca', random_state=42)
    reduced = tsne.fit_transform(features)

    return reduced, labels.numpy()

