import time
import os

from dataset_004 import train_loader, val_data, test_data, origin_data
from model import Model
import torch.nn.functional as F
from fasttext import FTModel
import torch
from create_alarms_data_002 import ALL_ORIGIN_INFOS
from gragh_tools import draw

model = Model(300)
# model.change_model(train_data)
model = model.to("cuda:0")

# optimizer = torch.optim.RMSprop(model.parameters(), lr=0.001, alpha=0.99, eps=1e-09, weight_decay=1e-3, momentum=0.01,
#                                 centered=False)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
# optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
#
# optimizer = torch.optim.RMSprop(model.parameters(), lr=0.01)

# batch = next(iter(train_loader))
best_loss = 1000000.0
MODEL_PATH = "/home/Dyf/code/storage_models/alarms/"
i = 0
# for batch in train_loader:
#     i += 1
#     draw(batch, origin_data, 0, i)
#     break
def get_sentence_vectors(texts):
    word_embeddings_list = []
    for text in texts:
        encoded_input = FTModel.embedding_text_to_tensor(text)
        word_embeddings_list.append(encoded_input.unsqueeze(0))
    return torch.cat(word_embeddings_list, dim=0)

def translate_batch(batch):
    _x_list = [ALL_ORIGIN_INFOS[id] for id in batch.x.tolist()]
    batch.x = get_sentence_vectors(_x_list)
    _batch = batch.to("cuda:0")
    return _batch


new_val_data = translate_batch(val_data)

idx = 0
for epoch in range(20000):
    try:
        model.train()
        train_losses = 0.0
        val_losses = 0.0
        true_lens = 0.0
        all_lens = 0.0
        for batch in train_loader:
            idx += 1
            # draw(batch, origin_data, epoch, idx)
            optimizer.zero_grad()
            batch = translate_batch(batch)
            out = model(batch)
            # print(batch,"################################################123")
            label = batch.edge_label.to(torch.float).to("cuda")
            # print("#########################################################################")
            # print(torch.round(out))
            # print(label)
            true_len = len([i for i in torch.round(out) == label if i == True])
            all_len = len([i for i in torch.round(out) == label])

            true_lens += true_len
            all_lens += all_len
            # print(len(true_list), len(all_list))
            # loss = F.nll_loss(out,label)
            loss = F.binary_cross_entropy_with_logits(out, label)
            # print("Train loss",i, loss)
            loss.backward()
            optimizer.step()
            # print("batch {} val_loss {}".format(epoch, v_loss.item()))
            train_losses += loss.item()

        print("batch {} train_losses {} Percent {}".format(epoch, train_losses, true_lens / float(all_lens)))
        model.eval()
        out = model(new_val_data)
        edge_label = new_val_data.edge_label.to(torch.float).to("cuda")
        # print(torch.round(out))
        # print(edge_label)
        v_loss = F.binary_cross_entropy_with_logits(out, edge_label)
        val_losses += v_loss.item()

        print("batch {} val_losses {}".format(epoch, val_losses))
        if val_losses < best_loss:
            best_loss = val_losses
            torch.save(model, os.path.join(MODEL_PATH, 'Alarms_{}.pth').format(epoch))
            print("=> saved best model", epoch, val_losses)

    except Exception:
        import traceback
        traceback.print_exc()

edge_label = test_data.edge_label.to(torch.float).to("cuda")
new_test_data = translate_batch(test_data)
out = model(new_test_data)
t_loss = F.binary_cross_entropy_with_logits(out, edge_label)
print("Test_loss {}".format(t_loss.item()))
# draw(test_data, data)

# data = data.to("cuda")
