import yfinance as yf
import datetime as dt
import pandas as pd
from sklearn.preprocessing import StandardScaler
import numpy as np
import torch
from torch_geometric.data import Data
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GATConv
from torch_geometric.loader import DataLoader
import random
from tqdm import tqdm
import plotly.graph_objs as go
from plotly.offline import iplot

# 数据加载
start_date = dt.datetime(2013, 1, 1)
end_date = dt.datetime(2024, 3, 7)

google = yf.download("GOOGL", start_date, end_date)
apple = yf.download("AAPL", start_date, end_date)
microsoft = yf.download("MSFT", start_date, end_date)
amazon = yf.download("AMZN", start_date, end_date)
meta = yf.download("META", start_date, end_date)
nvidia = yf.download("NVDA", start_date, end_date)

data = pd.DataFrame({
    'google': google['Open'],
    'microsoft': microsoft['Open'],
    'amazon': amazon['Open'],
    'nvidia': nvidia['Open'],
    'meta': meta['Open'],
    'apple': apple['Open']
})

# 数据标准化
scaler = StandardScaler()
data_scaled = pd.DataFrame(scaler.fit_transform(data), columns=data.columns)

# 邻接矩阵
def AdjacencyMatrix(L):
    AdjM = np.ones((L, L))
    return AdjM

class StockMarketDataset:
    def __init__(self, W, N_hist, N_pred):
        self.W = W
        self.N_hist = N_hist
        self.N_pred = N_pred

    def DatasetCreate(self):
        num_days, self.n_node = data_scaled.shape
        n_window = self.N_hist + self.N_pred
        edge_index, edge_attr = self._create_edges(self.n_node)
        sequences = self._create_sequences(data_scaled, self.n_node, n_window, edge_index, edge_attr)
        return sequences

    def _create_edges(self, n_node):
        edge_index = torch.zeros((2, n_node ** 2), dtype=torch.long)
        edge_attr = torch.zeros((n_node ** 2, 1))
        num_edges = 0
        for i in range(n_node):
            for j in range(n_node):
                if self.W[i, j] != 0:
                    edge_index[:, num_edges] = torch.tensor([i, j], dtype=torch.long)
                    edge_attr[num_edges, 0] = self.W[i, j]
                    num_edges += 1
        edge_index = edge_index[:, :num_edges]
        edge_attr = edge_attr[:num_edges]
        return edge_index, edge_attr

    def _create_sequences(self, data, n_node, n_window, edge_index, edge_attr):
        sequences = []
        num_days, _ = data.shape
        for i in range(num_days - n_window + 1):
            sta = i
            end = i + n_window
            full_window = np.swapaxes(data[sta:end, :], 0, 1)
            g = Data(x=torch.FloatTensor(full_window[:, :self.N_hist]),
                     y=torch.FloatTensor(full_window[:, self.N_hist:]),
                     edge_index=edge_index,
                     num_nodes=n_node)
            sequences.append(g)
        return sequences

# 数据集划分
def train_val_test_splits(sequences, splits):
    total = len(sequences)
    split_train, split_val, split_test = splits

    # Calculate split indices
    idx_train = int(total * split_train)
    idx_val = int(total * (split_train + split_val))
    indices = [i for i in range(len(sequences) - 100)]
    random.shuffle(indices)
    train = [sequences[index] for index in indices[:idx_train]]
    val = [sequences[index] for index in indices[idx_train:idx_val]]
    test = [sequences[index] for index in indices[idx_val:]]
    return train, val, test

# 设置超参数
n_nodes = 6
n_hist = 50
n_pred = 10
batch_size = 32

# 邻接矩阵
W = AdjacencyMatrix(n_nodes)

# 转换数据为图形时间序列
dataset = StockMarketDataset(W, n_hist, n_pred)
sequences = dataset.DatasetCreate()

# 数据集划分
splits = (0.9, 0.05, 0.05)
train, val, test = train_val_test_splits(sequences, splits)

train_dataloader = DataLoader(train, batch_size=batch_size, shuffle=True, drop_last=True)
val_dataloader = DataLoader(val, batch_size=batch_size, shuffle=True, drop_last=True)
test_dataloader = DataLoader(test, batch_size=batch_size, shuffle=True, drop_last=True)

# 模型定义
class ST_GNN_Model(torch.nn.Module):
    def __init__(self, in_channels, out_channels, n_nodes, gru_hs_l1, gru_hs_l2, heads=1, dropout=0.01):
        super(ST_GNN_Model, self).__init__()
        self.n_pred = out_channels
        self.heads = heads
        self.dropout = dropout
        self.n_nodes = n_nodes
        self.gru_hidden_size_l1 = gru_hs_l1
        self.gru_hidden_size_l2 = gru_hs_l2
        self.decoder_hidden_size = self.gru_hidden_size_l2

        # Encoder GRU layers
        self.gat = GATConv(in_channels=in_channels, out_channels=in_channels, heads=heads, dropout=dropout, concat=False)
        self.encoder_gru_l1 = torch.nn.GRU(input_size=self.n_nodes, hidden_size=self.gru_hidden_size_l1, num_layers=1, bias=True)
        self.encoder_gru_l2 = torch.nn.GRU(input_size=self.gru_hidden_size_l1, hidden_size=self.gru_hidden_size_l2, num_layers=1, bias=True)

        # Decoder
        self.GRU_decoder = torch.nn.GRU(input_size=self.gru_hidden_size_l2, hidden_size=self.decoder_hidden_size, num_layers=1, bias=True, dropout=self.dropout)
        self.prediction_layer = torch.nn.Linear(self.decoder_hidden_size, self.n_nodes * self.n_pred, bias=True)

    def forward(self, data, device):
        x, edge_index = data.x, data.edge_index
        if device == 'cpu':
            x = torch.FloatTensor(x)
        else:
            x = torch.cuda.FloatTensor(x)
        x = self.gat(x, edge_index)
        x = F.dropout(x, self.dropout, training=self.training)
        batch_size = data.num_graphs
        n_node = int(data.num_nodes / batch_size)
        x = torch.reshape(x, (batch_size, n_node, data.num_features))
        x = torch.movedim(x, 2, 0)
        encoderl1_outputs, _ = self.encoder_gru_l1(x)
        x = F.relu(encoderl1_outputs)
        encoderl2_outputs, h2 = self.encoder_gru_l2(x)
        x = F.relu(encoderl2_outputs)
        x, _ = self.GRU_decoder(x, h2)
        x = torch.squeeze(x[-1, :, :])
        x = self.prediction_layer(x)
        x = torch.reshape(x, (batch_size, self.n_nodes, self.n_pred))
        x = torch.reshape(x, (batch_size * self.n_nodes, self.n_pred))
        return x

# 模型训练
# 超参数
gru_hs_l1 = 16
gru_hs_l2 = 16
learning_rate = 1e-3
Epochs = 50
device = 'cuda' if torch.cuda.is_available() else 'cpu'

model = ST_GNN_Model(in_channels=n_hist, out_channels=n_pred, n_nodes=n_nodes, gru_hs_l1=gru_hs_l1, gru_hs_l2=gru_hs_l2)
pretrained = False
model_path = "ST_GNN_Model.pth"

if pretrained:
    model.load_state_dict(torch.load(model_path))

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-7)
criterion = torch.nn.MSELoss()
model.to(device)

for epoch in range(Epochs):
    model.train()
    for _, batch in enumerate(tqdm(train_dataloader, desc=f"Epoch {epoch}")):
        batch = batch.to(device)
        optimizer.zero_grad()
        y_pred = torch.squeeze(model(batch, device))
        loss = criterion(y_pred.float(), torch.squeeze(batch.y).float())
        loss.backward()
        optimizer.step()
    print(f"Loss: {loss:.7f}")

# 模型评估与可视化
@torch.no_grad()
def Extract_results(model, device, dataloader, type=''):
    model.eval()
    model.to(device)
    y_pred