import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv, GATConv
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
import numpy as np
from sklearn.cluster import KMeans
from sklearn.exceptions import ConvergenceWarning
import warnings

# 抑制KMeans的收敛警告
warnings.filterwarnings("ignore", category=ConvergenceWarning)


# 1. 数据预处理 - 动态调整聚类数
class MNISTGraphDataset:
    def __init__(self, dataset, max_clusters=15, min_clusters=3):
        self.dataset = dataset
        self.max_clusters = max_clusters
        self.min_clusters = min_clusters

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        img, label = self.dataset[idx]
        img_np = img.squeeze().numpy()

        # 动态确定聚类数（基于非零像素比例）
        nonzero_ratio = np.count_nonzero(img_np) / img_np.size
        n_clusters = max(self.min_clusters,
                         min(self.max_clusters, int(self.max_clusters * nonzero_ratio)))

        # 添加微量噪声避免重复点
        img_np += np.random.uniform(0, 1e-3, size=img_np.shape)

        kmeans = KMeans(n_clusters=n_clusters, random_state=0, n_init=10)
        clusters = kmeans.fit_predict(img_np.reshape(-1, 1))
        clusters = clusters.reshape(28, 28)

        unique_clusters = np.unique(clusters)
        centroids = np.array([np.mean(np.argwhere(clusters == i), axis=0)
                              for i in unique_clusters])

        # 节点特征（均值+坐标+像素方差）
        node_features = []
        for i, cluster_id in enumerate(unique_clusters):
            mask = (clusters == cluster_id)
            region_values = img_np[mask]
            cx, cy = centroids[i]
            node_features.append([
                region_values.mean(),
                cx / 28,
                cy / 28,
                region_values.var()
            ])

        # 构建图（全连接+自循环）
        num_nodes = len(node_features)
        src = np.repeat(range(num_nodes), num_nodes)
        dst = np.tile(range(num_nodes), num_nodes)

        g = dgl.graph((src, dst))
        g.ndata['feat'] = torch.FloatTensor(node_features)
        g.ndata['label'] = torch.LongTensor([label] * num_nodes)

        # 边特征（距离+强度差+相对角度）
        src_pos = centroids[src]
        dst_pos = centroids[dst]
        spatial_dist = np.linalg.norm(src_pos - dst_pos, axis=1)
        intensity_diff = np.abs(np.array(node_features)[src, 0] - np.array(node_features)[dst, 0])
        angle = np.arctan2(dst_pos[:, 1] - src_pos[:, 1], dst_pos[:, 0] - src_pos[:, 0])

        g.edata['feat'] = torch.FloatTensor(np.column_stack([
            spatial_dist,
            intensity_diff,
            np.sin(angle),
            np.cos(angle)
        ]))

        return g, label

    # 2. 增强版GNN模型


class MNISTGNN(nn.Module):
    def __init__(self, in_dim=4, hidden_dim=64, num_classes=10):
        super().__init__()
        self.conv1 = GraphConv(in_dim, hidden_dim)
        self.conv2 = GATConv(hidden_dim, hidden_dim, num_heads=3)
        self.edge_encoder = nn.Linear(4, hidden_dim)
        self.fc = nn.Linear(hidden_dim * 3, num_classes)

    def forward(self, g, features):
        # 边特征编码
        g.edata['he'] = self.edge_encoder(g.edata['feat'])

        h = F.relu(self.conv1(g, features))
        h = self.conv2(g, h, edge_feat=g.edata['he'])
        h = h.view(-1, h.size(1) * h.size(2))
        g.ndata['h'] = h

        # 注意力加权池化
        with g.local_scope():
            g.ndata['a'] = torch.sigmoid(self.attn_pool(h))
            hg = dgl.sum_nodes(g, 'h', 'a')
            return self.fc(hg)

        # 3. 优化后的训练流程


def train():
    # 数据加载
    train_data = MNIST(root='./data', train=True, download=True, transform=ToTensor())
    test_data = MNIST(root='./data', train=False, transform=ToTensor())

    train_graphs = MNISTGraphDataset(train_data)
    test_graphs = MNISTGraphDataset(test_data)

    def collate_fn(batch):
        graphs, labels = zip(*batch)
        batched_graph = dgl.batch(graphs)
        return batched_graph, torch.tensor(labels)

    train_loader = DataLoader(list(zip(train_graphs, train_data.targets)),
                              batch_size=32, shuffle=True, collate_fn=collate_fn)
    test_loader = DataLoader(list(zip(test_graphs, test_data.targets)),
                             batch_size=32, collate_fn=collate_fn)

    # 模型与优化器
    model = MNISTGNN()
    optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-4)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    criterion = nn.CrossEntropyLoss()

    # 训练循环
    for epoch in range(10):
        model.train()
        total_loss = 0
        for batched_g, labels in train_loader:
            optimizer.zero_grad()
            logits = model(batched_g, batched_g.ndata['feat'])
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        scheduler.step()

        # 验证
        model.eval()
        correct = 0
        with torch.no_grad():
            for g, labels in test_loader:
                logits = model(g, g.ndata['feat'])
                correct += (logits.argmax(1) == labels).sum().item()

        print(f"Epoch {epoch:02d} | Loss: {total_loss / len(train_loader):.4f} | "
              f"Acc: {correct / len(test_data):.4f}")


if __name__ == "__main__":
    train()