from dataset import train_loader,train_data
from model import Model
import torch.nn.functional as F
import torch


model =  Model(768)
model.change_model(train_data)
model =model.to("cuda:0")

#         optimizer.zero_grad()
#         batch = batch.to('cuda:0')
#         batch_size = batch['alarm'].batch_size
#         out = model(batch.x_dict, batch.edge_index_dict)
#         break
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
optimizer =  torch.optim.RMSprop(model.parameters(),lr=0.0001,alpha=0.99,eps=1e-08,weight_decay=5e-4,momentum=0,centered=False)
# batch = next(iter(train_loader))

for i in range(1000):
    model.train()
    for batch in train_loader:
        optimizer.zero_grad()
        aoh_edge_label = batch[0][("alarm", "on", "host")].edge_label.to(torch.float).to("cuda")
        atb_edge_label = batch[1][("alarm", "to", "bussiness_tree")].edge_label.to(torch.float).to("cuda")
        hbb_edge_label = batch[2][("host", "belongsto", "bussiness_tree")].edge_label.to(torch.float).to("cuda")
        # print(aoh_edge_label, atb_edge_label, hbb_edge_label)

        out0,out1,out2 = model(batch)
        # print(out0)
        # print(out1)
        # print(out2)
        # print(aoh_edge_label)
        # print(atb_edge_label)
        # print(hbb_edge_label)
        # print("##################################################")
        loss0 = F.binary_cross_entropy_with_logits(out0, aoh_edge_label)
        loss1 = F.binary_cross_entropy_with_logits(out1, atb_edge_label)
        loss2 = F.binary_cross_entropy_with_logits(out2,hbb_edge_label)
        loss = loss0+loss1+loss2
        # print(i, loss)
        loss.backward(retain_graph=True)
        optimizer.step()
        print(i,loss.item())

# data = data.to("cuda")