import time
import torch_geometric.utils
from torch_geometric.data import HeteroData
import networkx as nx
from torch_geometric.loader import LinkNeighborLoader
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.nn import HGTConv, Linear
from origin_data import MyData

from load_data import alarms_df

data = MyData(alarms_df).data
# print(data)
data = T.ToUndirected()(data)
# print(data[("host", "belongsto", "bussiness_tree")])

transform = T.RandomLinkSplit(
    num_val=0.1,
    num_test=0.1,
    disjoint_train_ratio=0.3,
    neg_sampling_ratio=2.0,
    add_negative_train_samples=False,
    edge_types=[
        ("alarm", "on", "host"),
        ("alarm", "to", "bussiness_tree"),
        ("host", "belongsto", "bussiness_tree")
    ],
    rev_edge_types=[
        ("host", "rev_on", "alarm"),
        ("bussiness_tree", "rev_to", "alarm"),
        ("bussiness_tree", "rev_belongsto", "host")
    ]
)

train_data,val_data,test_data = transform(data)
# train_data,val_data,test_data = transform(data.to_homogeneous())
print(train_data,"2323")
# print(val_data)
# print(test_data)

aoh_label_index = train_data[("alarm", "on", "host")].edge_label_index
aoh_edge_label = train_data[("alarm", "on", "host")].edge_label
atb_label_index = train_data[("alarm", "to", "bussiness_tree")].edge_label_index
atb_edge_label = train_data[("alarm", "to", "bussiness_tree")].edge_label
hbb_label_index = train_data[("host", "belongsto", "bussiness_tree")].edge_label_index
hbb_edge_label = train_data[("host", "belongsto", "bussiness_tree")].edge_label
# print(aoh_label_index,aoh_edge_label)
# print(atb_label_index,atb_edge_label)
# print(hbb_label_index,hbb_edge_label)
#


# train_loader
train_loader = LinkNeighborLoader(
    data=train_data,
    num_neighbors=[15] * 2,
    neg_sampling_ratio=2.0,
    edge_label_index=([
        ("alarm", "on", "host"),aoh_label_index.to(torch.long),
        ("alarm", "to", "bussiness_tree"),atb_label_index.to(torch.long)
        ("host", "belongsto", "bussiness_tree"),hbb_label_index.to(torch.long)
    ]),
    edge_label=atb_edge_label.to(torch.long),
    batch_size=4,
    shuffle=True
)

# batch = next(iter(train_loader))
# for batch in train_loader:
#     print("batch...........", batch.x_dict["alarm"],batch.edge_index_dict)
# data = data.to("cuda")
#
#
# class HGT(torch.nn.Module):
#     def __init__(self, hidden_channels, out_channels, num_heads, num_layers):
#         super().__init__()
#
#         self.lin_dict = torch.nn.ModuleDict()
#         for node_type in data.node_types:
#             self.lin_dict[node_type] = Linear(-1, hidden_channels)
#
#         self.convs = torch.nn.ModuleList()
#         for _ in range(num_layers):
#             conv = HGTConv(hidden_channels, hidden_channels, data.metadata(),
#                            num_heads, group='sum')
#             self.convs.append(conv)
#
#         self.lin = Linear(hidden_channels, out_channels)
#
#     def forward(self, x_dict, edge_index_dict):
#         for node_type, x in x_dict.items():
#             # print("node_type", node_type)
#             x_dict[node_type] = self.lin_dict[node_type](x).relu_()
#
#         for conv in self.convs:
#             x_dict = conv(x_dict, edge_index_dict)
#
#         return self.lin(x_dict['alarm'])
#
#
#
#

#
#
#
# # num_neighbors = {key: [15] * 2 for key in data.edge_types}
#
# model = HGT(hidden_channels=32, out_channels=2,
#             num_heads=2, num_layers=2)
# model = model.to("cuda:0")
#
#
#
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# with torch.no_grad():  # Initialize lazy modules.
#     for batch in train_loader:
#         optimizer.zero_grad()
#         batch = batch.to('cuda:0')
#         batch_size = batch['alarm'].batch_size
#         out = model(batch.x_dict, batch.edge_index_dict)
#         break
#
#
# def train():
#     model.train()
#     total_examples = total_loss = 0
#     index = 0
#     for batch in train_loader:
#         print("batch...........", batch.x_dict["alarm"],batch.edge_index_dict)
#         # time.sleep(5)
#         optimizer.zero_grad()
#         batch = batch.to('cuda:0')
#         batch_size = batch['alarm'].batch_size
#         out = model(batch.x_dict, batch.edge_index_dict)
#         loss = F.cross_entropy(out[:batch_size],
#                                batch['alarm'].y[:batch_size])
#         loss.backward()
#         optimizer.step()
#
#         total_examples += batch_size
#         total_loss += float(loss) * batch_size
#         index += 0
#
#         if index % 10000 == 0:
#             print("loss", loss.item())
#
#     return total_loss / total_examples
#
#
# for i in range(1000000):
#     print(train())
