import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

# 1. 数据加载与预处理
def load_data(file_path):
    data = pd.read_csv(file_path)
    lon_lat = data[['lon', 'lat']].values
    time_series = data.iloc[:, 3:].values  # 从第4列开始是时序数据
    indices = data['Index'].values
    return indices, lon_lat, time_series

def construct_graph(lon_lat, sigma=1.0, k=5):
    dist_matrix = euclidean_distances(lon_lat)
    adj_matrix = np.exp(-dist_matrix**2 / (2 * sigma**2))  # 高斯核函数
    # 保留每个点的 k 近邻
    knn_mask = np.argsort(dist_matrix, axis=1)[:, :k]
    adj_matrix = np.zeros_like(dist_matrix)
    for i, neighbors in enumerate(knn_mask):
        adj_matrix[i, neighbors] = 1
    return adj_matrix

# 2. Graph Transformer 模型
class GraphTransformer(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_heads, num_layers, cluster_dim):
        super(GraphTransformer, self).__init__()
        self.input_projection = nn.Linear(input_dim, hidden_dim)
        self.transformer_layers = nn.ModuleList([
            nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=num_heads, dim_feedforward=hidden_dim * 4, dropout=0.1)
            for _ in range(num_layers)
        ])
        self.output_projection = nn.Linear(hidden_dim, cluster_dim)

    def forward(self, x, adj_matrix=None):
        # 调整输入形状为 (sequence_length, batch_size, embedding_dim)
        x = x.unsqueeze(0)  # 添加 sequence_length 维度，变为 (1, batch_size, embedding_dim)
        h = self.input_projection(x)
        for layer in self.transformer_layers:
            h = layer(h)
        h = h.squeeze(0)  # 恢复形状为 (batch_size, embedding_dim)
        h = self.output_projection(h)
        return h

# 3. 自监督损失
class ContrastiveLoss(nn.Module):
    def __init__(self, temperature=0.5):
        super(ContrastiveLoss, self).__init__()
        self.temperature = temperature

    def forward(self, z1, z2):
        z1 = nn.functional.normalize(z1, p=2, dim=1)
        z2 = nn.functional.normalize(z2, p=2, dim=1)
        logits = torch.mm(z1, z2.T) / self.temperature
        labels = torch.arange(z1.size(0)).to(z1.device)
        loss = nn.CrossEntropyLoss()(logits, labels)
        return loss

# 4. 训练循环
def train(dataset, model, optimizer, epochs=50):
    model.train()
    for epoch in range(epochs):
        for x, adj in dataset:
            x = x.to(device)
            features = model(x)
            loss = ContrastiveLoss()(features, features)  # 自监督损失
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print(f"Epoch {epoch + 1}, Loss: {loss.item()}")
    return features.cpu().detach().numpy()  # 返回最后的特征表示

# 5. 可视化并保存
def visualize_and_save(features, lon_lat, output_path="visualization.png"):
    pca = PCA(n_components=2)
    reduced_features = pca.fit_transform(features)
    plt.figure(figsize=(10, 8))
    scatter = plt.scatter(reduced_features[:, 0], reduced_features[:, 1], c=lon_lat[:, 0], cmap='viridis', s=30)
    plt.colorbar(scatter, label='Longitude')
    plt.xlabel('PCA Component 1')
    plt.ylabel('PCA Component 2')
    plt.title('Feature Visualization')
    plt.savefig(output_path, dpi=300)
    print(f"Visualization saved to {output_path}")

# 主流程
file_path = "meida_csv.csv"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

indices, lon_lat, time_series = load_data(file_path)
adj_matrix = construct_graph(lon_lat)

x_tensor = torch.tensor(time_series, dtype=torch.float32).to(device)

# 初始化模型与优化器
model = GraphTransformer(input_dim=time_series.shape[1], hidden_dim=256, num_heads=4, num_layers=6, cluster_dim=10).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 创建数据集 (简化，直接用单个张量表示)
dataset = [(x_tensor, None)]  # 邻接矩阵未用在当前模型中

# 训练模型
features = train(dataset, model, optimizer)

# 保存可视化结果
visualize_and_save(features, lon_lat, output_path="feature_visualization.png")
