#coding:utf-8

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv, ChebConv, SAGEConv
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import os

# 检查是否有GPU可用
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 图神经网络模型类
class GNNPredictor(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers=2, model_type='gcn'):
        super(GNNPredictor, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.num_layers = num_layers
        self.model_type = model_type
        
        # 定义图卷积层
        self.conv_layers = nn.ModuleList()
        
        if model_type == 'gcn':
            # GCN模型
            self.conv_layers.append(GCNConv(input_dim, hidden_dim))
            for _ in range(num_layers - 2):
                self.conv_layers.append(GCNConv(hidden_dim, hidden_dim))
            self.conv_layers.append(GCNConv(hidden_dim, output_dim))
        elif model_type == 'gat':
            # GAT模型
            self.conv_layers.append(GATConv(input_dim, hidden_dim, heads=4, concat=True))
            for _ in range(num_layers - 2):
                self.conv_layers.append(GATConv(hidden_dim * 4, hidden_dim, heads=4, concat=True))
            self.conv_layers.append(GATConv(hidden_dim * 4, output_dim, heads=1, concat=False))
        elif model_type == 'cheb':
            # ChebyNet模型
            self.conv_layers.append(ChebConv(input_dim, hidden_dim, K=2))
            for _ in range(num_layers - 2):
                self.conv_layers.append(ChebConv(hidden_dim, hidden_dim, K=2))
            self.conv_layers.append(ChebConv(hidden_dim, output_dim, K=2))
        elif model_type == 'sage':
            # GraphSAGE模型
            self.conv_layers.append(SAGEConv(input_dim, hidden_dim))
            for _ in range(num_layers - 2):
                self.conv_layers.append(SAGEConv(hidden_dim, hidden_dim))
            self.conv_layers.append(SAGEConv(hidden_dim, output_dim))
        
        # 全连接层用于最终预测
        self.fc = nn.Linear(output_dim, 1)
        
        # Dropout层防止过拟合
        self.dropout = nn.Dropout(0.5)
    
    def forward(self, x, edge_index, edge_weight=None):
        # 前向传播
        for i, conv in enumerate(self.conv_layers[:-1]):
            if edge_weight is None:
                x = conv(x, edge_index)
            else:
                x = conv(x, edge_index, edge_weight)
            x = F.relu(x)
            x = self.dropout(x)
        
        # 最后一层不需要激活函数和Dropout
        if edge_weight is None:
            x = self.conv_layers[-1](x, edge_index)
        else:
            x = self.conv_layers[-1](x, edge_index, edge_weight)
        
        # 全连接层输出最终预测值
        x = self.fc(x)
        return x

# 构建时间序列的图结构
def build_time_series_graph(data, window_size=24):
    """
    构建时间序列的图结构，每个节点代表一个时间点，连接相邻的时间点
    """
    num_nodes = len(data)
    
    # 构建边索引
    edge_index = []
    edge_weight = []
    
    # 为每个节点连接前后window_size个节点
    for i in range(num_nodes):
        for j in range(max(0, i - window_size), min(num_nodes, i + window_size + 1)):
            if i != j:
                edge_index.append([i, j])
                # 边的权重设置为时间差的倒数，时间越近权重越大
                weight = 1.0 / (abs(i - j) + 1)
                edge_weight.append(weight)
    
    # 转换为PyTorch的张量
    edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous()
    edge_weight = torch.tensor(edge_weight, dtype=torch.float)
    
    return edge_index, edge_weight

# 第一个函数：GNN算法模型生成并保存
def train_and_save_gnn_model(dataframe, model_path='./model/gnn_model.pth', 
                             hidden_dim=64, output_dim=32, num_layers=2, 
                             model_type='gcn', lr=0.001, epochs=100, 
                             window_size=24, scaler_path='./model/scaler.pkl'):
    """
    训练GNN模型并保存
    
    参数:
    dataframe: pandas DataFrame，包含训练数据，最后一列是因变量，其余是自变量
    model_path: 字符串，模型保存路径
    hidden_dim: 整数，隐藏层维度
    output_dim: 整数，输出层维度
    num_layers: 整数，网络层数
    model_type: 字符串，模型类型 ('gcn', 'gat', 'cheb', 'sage')
    lr: 浮点数，学习率
    epochs: 整数，训练轮数
    window_size: 整数，时间窗口大小
    scaler_path: 字符串，标准化器保存路径
    
    返回:
    字典，包含训练信息
    """
    # 确保保存目录存在
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    os.makedirs(os.path.dirname(scaler_path), exist_ok=True)
    
    # 分离自变量和因变量
    X = dataframe.iloc[:, :-1].values  # 自变量（除最后一列外的所有列）
    y = dataframe.iloc[:, -1].values.reshape(-1, 1)  # 因变量（最后一列）
    
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    
    # 保存标准化器
    import joblib
    joblib.dump(scaler, scaler_path)
    
    # 转换为PyTorch张量
    X_tensor = torch.tensor(X_scaled, dtype=torch.float).to(device)
    y_tensor = torch.tensor(y, dtype=torch.float).to(device)
    
    # 构建图结构
    edge_index, edge_weight = build_time_series_graph(X_scaled, window_size)
    edge_index = edge_index.to(device)
    edge_weight = edge_weight.to(device)
    
    # 创建模型
    input_dim = X.shape[1]
    model = GNNPredictor(input_dim, hidden_dim, output_dim, num_layers, model_type).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    
    # 训练过程
    model.train()
    losses = []
    
    for epoch in range(epochs):
        optimizer.zero_grad()
        
        # 前向传播
        output = model(X_tensor, edge_index, edge_weight)
        loss = criterion(output, y_tensor)
        
        # 反向传播和优化
        loss.backward()
        optimizer.step()
        
        # 记录损失
        losses.append(loss.item())
        
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}')
    
    # 保存模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'input_dim': input_dim,
        'hidden_dim': hidden_dim,
        'output_dim': output_dim,
        'num_layers': num_layers,
        'model_type': model_type
    }, model_path)
    
    print(f"模型已保存至: {model_path}")
    print(f"标准化器已保存至: {scaler_path}")
    
    return {
        'final_loss': losses[-1],
        'min_loss': min(losses),
        'model_path': model_path,
        'scaler_path': scaler_path
    }

# 第二个函数：调用GNN模型进行数据预测
def predict_with_gnn_model(x_data, model_path='./model/gnn_model.pth', 
                          scaler_path='./model/scaler.pkl', window_size=24):
    """
    使用训练好的GNN模型进行预测
    
    参数:
    x_data: pandas DataFrame或numpy数组，包含自变量数据
    model_path: 字符串，模型保存路径
    scaler_path: 字符串，标准化器保存路径
    window_size: 整数，时间窗口大小
    
    返回:
    numpy数组，预测结果
    """
    # 加载模型和标准化器
    import joblib
    
    # 加载模型配置和参数
    checkpoint = torch.load(model_path)
    input_dim = checkpoint['input_dim']
    hidden_dim = checkpoint['hidden_dim']
    output_dim = checkpoint['output_dim']
    num_layers = checkpoint['num_layers']
    model_type = checkpoint['model_type']
    
    # 创建模型实例
    model = GNNPredictor(input_dim, hidden_dim, output_dim, num_layers, model_type).to(device)
    model.load_state_dict(checkpoint['model_state_dict'])
    
    # 加载标准化器
    scaler = joblib.load(scaler_path)
    
    # 确保输入是numpy数组
    if isinstance(x_data, pd.DataFrame):
        X = x_data.values
    else:
        X = np.array(x_data)
    
    # 数据标准化
    X_scaled = scaler.transform(X)
    
    # 转换为PyTorch张量
    X_tensor = torch.tensor(X_scaled, dtype=torch.float).to(device)
    
    # 构建图结构
    edge_index, edge_weight = build_time_series_graph(X_scaled, window_size)
    edge_index = edge_index.to(device)
    edge_weight = edge_weight.to(device)
    
    # 预测
    model.eval()
    with torch.no_grad():
        predictions = model(X_tensor, edge_index, edge_weight)
    
    # 转换为numpy数组
    predictions_np = predictions.cpu().numpy().flatten()
    
    return predictions_np

#例子
#使用GAT模型，自定义参数
# result = train_and_save_gnn_model(
#     df, 
#     model_path='./model/gat_model_epochs200_lr0005.pth',
#     hidden_dim=128,         # 增加隐藏层维度
#     output_dim=64,          # 增加输出层维度
#     num_layers=3,           # 3层网络
#     model_type='gat',       # 使用GAT模型
#     lr=0.0005,              # 学习率
#     epochs=200,             # 训练200轮
#     window_size=48,         # 更大的时间窗口
#     scaler_path='./model/gat_scaler.pkl'
# )