import time
import os

from dataset_004 import train_loader, val_data, train_data, test_data, HeteroDataInfo
from model import Model
from fasttext import FTModel
import torch
from create_alarms_data_002 import ORIGIN_DATA
from gragh_tools import draw

device = "cuda:0" if torch.cuda.is_available() else "cpu"

best_loss = 1000000.0
MODEL_PATH = "/home/Dyf/code/storage_models/alarms/"
i = 0
# for batch in train_loader:
#     i += 1
#     draw(batch, origin_data, 0, i)
#     break

# 使用weight 来估算loss值，先要定义weight
use_weighted_loss = True
if use_weighted_loss:
    weight = torch.bincount(train_data['alarm', 'btree'].edge_label.to(int))
    weight = weight.max() / weight
else:
    weight = None


# print("Use weight",weight)
# 创建一种loss方式
def weighted_mse_loss(pred, target, weight=None):
    # print(pred.dtype,target,weight)
    target = target.to(int)
    # weight = 1. if weight is None else weight[target].to(pred.dtype)
    # return (weight * (pred - target.to(pred.dtype)).pow(2)).mean()
    weight = 1. if weight is None else weight[target].to(pred.dtype)
    return (weight * (pred - target.to(pred.dtype)).pow(2)).mean()


def get_sentence_vectors(texts):
    word_embeddings_list = []
    for text in texts:
        encoded_input = FTModel.embedding_text_to_tensor(text)
        word_embeddings_list.append(encoded_input.unsqueeze(0))
    return torch.cat(word_embeddings_list, dim=0)


def translate_batch(batch):
    _btree_list = [ORIGIN_DATA.btrees_origin_infos[id] for id in batch["btree"].node_id.tolist()]
    # _host_list = [ORIGIN_DATA.ips_origin_infos[id] for id in batch["host"].node_id.tolist()]
    _alarm_list = [ORIGIN_DATA.alarms_origin_infos[id] for id in batch["alarm"].node_id.tolist()]
    # print(_btree_list)
    # print(_host_list)
    # print(_alarm_list)
    batch["btree"].x = get_sentence_vectors(_btree_list)
    # batch["host"].x = get_sentence_vectors(_host_list)
    batch["alarm"].x = get_sentence_vectors(_alarm_list)
    _batch = batch.to(device)
    return _batch


model = Model(300, HeteroDataInfo)
model = model.to(device)
# 初始化train_data
with torch.no_grad():
    for batch in train_loader:
        batch = translate_batch(batch)
        model.encoder(batch.x_dict, batch.edge_index_dict)
        break

# optimizer = torch.optim.RMSprop(model.parameters(), lr=0.001, alpha=0.99, eps=1e-09, weight_decay=1e-3, momentum=0.01,
#                                 centered=False)
criterion = torch.nn.BCELoss(size_average=False)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
# optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
new_val_data = translate_batch(val_data)

idx = 0
for epoch in range(3000):
    try:
        model.train()
        train_losses = 0.0
        val_losses = 0.0
        true_lens = 0.0
        all_lens = 0.0
        for batch in train_loader:
            # print(batch)
            idx += 1
            # draw(batch, origin_data, epoch, idx)
            optimizer.zero_grad()
            batch = translate_batch(batch)
            # print("############################################################")
            # print(batch["btree"].node_id,batch["btree"].n_id,"btree_node_id")
            # print(batch["alarm"].node_id,batch["alarm"].n_id,"alarm_node_id")
            # print(batch[("alarm", "to", "btree")].edge_index,"edge_index")
            # print(batch[("alarm", "to", "btree")].edge_label,"edge_label")
            # print(batch[("alarm", "to", "btree")].edge_label_index,"edge_label_index")
            # print(batch[("alarm", "to", "btree")].e_id,"e_id")
            # print(batch[("alarm", "to", "btree")].input_id,"input_id")
            # print("############################################################")
            out = model(batch)
            # print(out.shape)

            # print(batch[("alarm", "to", "btree")])
            # print(batch,"################################################123")
            label_train = batch[("alarm", "to", "btree")].edge_label
            label = label_train.to(torch.float).to(device)
            # print(label)
            #     # print("#########################################################################")
            #     # print(torch.round(out))
            #     # print(label)
            # print(label)
            # print(out)

            true_len = len([i for i in torch.round(out) == label if i == True])
            all_len = len([i for i in torch.round(out) == label])
            true_lens += true_len
            all_lens += all_len
            # print(len(true_list), len(all_list))
            # loss = F.nll_loss(out,label)
            # print(out)
            # print(label)
            loss = criterion(out, label)
            # print("11111",out)
            # print("22222",label)
            # loss = weighted_mse_loss(out, label, weight=weight)
            # print("Train loss", i, loss.item())s
            loss.backward()
            optimizer.step()
            # print("batch {} val_loss {}".format(epoch, v_loss.item()))
            train_losses += loss.item()
        print("batch {} train_losses {} Percent {}".format(epoch, train_losses, true_lens / float(all_lens)))
        model.eval()
        out = model(new_val_data)
        # edge_label = new_val_data.edge_label.to(torch.float).to(device)
        val_t_lable = new_val_data[("alarm", "to", "btree")].edge_label
        val_edge_label = val_t_lable.to(torch.float).to(device)
        v_loss = criterion(out, val_edge_label)
        # v_loss = weighted_mse_loss(out, val_edge_label, weight=weight)
        # v_loss = F.binary_cross_entropy_with_logits(out, edge_label)
        val_losses += v_loss.item()

        val_true_len = len([i for i in torch.round(out) == val_edge_label if i == True])
        val_all_len = len([i for i in torch.round(out) == val_edge_label])
        print("batch {} val_losses {} Percent {}".format(epoch, val_losses, val_true_len / float(val_all_len)))
        if val_losses < best_loss:
            best_loss = val_losses
            torch.save(model, os.path.join(MODEL_PATH, 'Alarms_{}.pth').format(epoch))
            print("=> saved best model", epoch, val_losses)
    except Exception:
        import traceback
        traceback.print_exc()

# for i in predict_label_index.t().tolist():


# print(out.shape)
# draw(test_data, data)

# data = data.to(device)
