from os import path as os_path
import json
from numpy import loadtxt
from torch import load as torch_load
from config import BasicConfig
from embedding_model.dataloader import UpdateDataIterator, UpdateDataset, UpdateBidirectionalIterator
from embedding_model.dataloader import TrainDataset, BidirectionalOneShotIterator
from embedding_model.run import run, set_logger
from torch.utils.data import DataLoader


class updateModel(object):

    def __init__(self, dataset, data_path, log_path, save_path, local_update_threshold, global_update_times,
                 embedding_model):
        self.dataset = dataset
        self.data_path = data_path
        self.local_update_threshold = local_update_threshold
        self.log_path = log_path
        self.save_path = save_path
        self.global_update_times = global_update_times
        self.embedding_model = embedding_model
        self.embed_current_times = 0

        self.embed_local_args = BasicConfig(data_path=self.data_path, log_path=self.log_path, save_path=self.save_path, dataset=self.dataset)
        self.embed_local_args.learning_rate = 0.0001
        self.embed_local_args.min_learning_rate = 0.000005
        self.embed_local_args.batch_size = 64
        self.embed_local_args.negative_sample_size = 32
        self.embed_local_args.epoch_num = 1
        self.embed_local_args.max_steps = 10
        self.embed_local_args.log_steps = 5
        self.embed_local_args.do_test = False
        self.embed_local_args.continue_train = True
        self.embed_local_args.model = self.embedding_model

        self.embed_global_args = BasicConfig(data_path=self.data_path, log_path=self.log_path, save_path=self.save_path, dataset=self.dataset)
        self.embed_global_args.learning_rate = 0.0005
        self.embed_global_args.min_learning_rate = 0.000005
        self.embed_global_args.batch_size = 64
        self.embed_global_args.negative_sample_size = 32
        self.embed_global_args.epoch_num = 1
        self.embed_global_args.max_steps = 20
        self.embed_global_args.log_steps = 5
        self.embed_global_args.do_test = False
        self.embed_global_args.continue_train = True
        self.embed_global_args.model = self.embedding_model

        self.logger = set_logger(self.embed_local_args, "{}_train".format(self.embed_local_args.model))

        self.record_path = os_path.join("..", "record", self.dataset)
        self.result_path = os_path.join("..", "result", self.dataset)

        fin = open(os_path.join(self.data_path, "info.json"), "r", encoding="utf-8")
        info = json.load(fin)
        self.ent_num = info['n_entity']
        self.rel_num = info['n_relation']

        fin = open(os_path.join(self.data_path, "ent_text2id.dict"), "r", encoding="utf-8")
        self.entity_id_dict = json.load(fin)

        fin = open(os_path.join(self.data_path, "rel_text2id.dict"), "r", encoding="utf-8")
        self.relation_id_dict = json.load(fin)

        assert self.ent_num == len(self.entity_id_dict)
        assert self.rel_num == len(self.relation_id_dict)

        fin = open(os_path.join(self.data_path, "train_id.txt"), 'r', encoding="utf-8")
        triple_ids = fin.readlines()
        self.triple_ids = [[int(elem) for elem in triple.strip().split('\t')] for triple in triple_ids]

        if os_path.exists(os_path.join(self.record_path, "entity_edge_h_dict.json")):
            with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "r", encoding="utf-8") as fin:
                self.entity_edge_h_dict = json.load(fin)
        else:
            self.build_entity_edge_h_dict()

        self.valuable_triples = []
        if os_path.exists(os_path.join(self.record_path, "valuable_triples.txt")):
            fin = open(os_path.join(self.record_path, "valuable_triples.txt"), "r", encoding="utf-8")
            for line in fin:
                elems = line.strip().split("\t")
                self.valuable_triples.append([int(elem) for elem in elems])

    def build_entity_edge_h_dict(self):
        self.entity_edge_h_dict = {}
        train_triples = loadtxt(os_path.join(self.data_path, "train_id.txt"), dtype=int)
        for triple in train_triples:
            head, relation, tail = triple
            try:
                self.entity_edge_h_dict[str(head)][str(relation)].append(int(tail))
            except:
                try:
                    self.entity_edge_h_dict[str(head)][str(relation)] = [int(tail)]
                except:
                    self.entity_edge_h_dict[str(head)] = {str(relation): [int(tail)]}

    def updateGraphEmbedding(self, triples):
        self.embed_current_times += 1
        triples_ids = self.map_triple2id(triples)
        # ((str, str, str), (), ...)
        valuable_triples, valuable_triples_ids = [], []
        for idx, triple in enumerate(triples_ids):
            if not self.tripleIsExist(triple):
                valuable_triples.append(triples[idx])
                valuable_triples_ids.append(triple)

        if valuable_triples_ids:
            self.updateFiles(valuable_triples, valuable_triples_ids)

        if self.global_update_times <= self.embed_current_times:
            self.updateGlobalEmbeddings()
            self.embed_current_times = 0

        elif len(self.valuable_triples) >= self.local_update_threshold:
            self.updateLocalEmbeddings()
            self.valuable_triples = []

    def updateKnowledgeGraphByRules(self, new_triples_str):
        triple_ids = self.map_triple2id(new_triples_str)
        valuable_triples, valuable_triples_ids = [], []
        for idx, triple in enumerate(triple_ids):
            if not self.tripleIsExist(triple):
                valuable_triples.append(new_triples_str[idx])
                valuable_triples_ids.append(triple)

        if valuable_triples_ids:
            self.updateFiles(valuable_triples, valuable_triples_ids)
            # with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "r", encoding="utf-8") as fin:
            #     self.entity_edge_h_dict = json.load(fin)

    def map_triple2id(self, triples):
        triple_ids = []
        for triple in triples:
            try:
                head_id = int(self.entity_id_dict[str(triple[0])])
            except:
                head_id = len(self.entity_id_dict)
                self.entity_id_dict[str(triple[0])] = head_id

            try:
                tail_id = int(self.entity_id_dict[str(triple[2])])
            except:
                tail_id = len(self.entity_id_dict)
                self.entity_id_dict[str(triple[2])] = tail_id

            try:
                relation_id = int(self.relation_id_dict[str(triple[1])])
            except:
                relation_id = len(self.relation_id_dict)
                self.relation_id_dict[str(triple[1])] = relation_id

            triple_ids.append([head_id, relation_id, tail_id])
        return triple_ids

    def tripleIsExist(self, triple):
        # ((id, id, id), (), ...)
        try:
            if int(triple[2]) in self.entity_edge_h_dict[str(triple[0])][str(triple[1])]:
                return True
            return False
        except:
            return False

    def updateFiles(self, triples, triple_ids):
        self.triple_ids += triple_ids
        self.valuable_triples += triple_ids

        info = {"n_entity": len(self.entity_id_dict), "n_relation": len(self.relation_id_dict)}
        with open(os_path.join(self.data_path, "info.json"), "w", encoding="utf-8") as fout:
            json.dump(info, fout)

        with open(os_path.join(self.data_path, "train_id.txt"), "a+", encoding="utf-8") as fout:
            for triple in triple_ids:
                fout.write("\t".join([str(_) for _ in triple]) + "\n")

        # with open(os_path.join(self.data_path, "ent_text2id.dict"), "w", encoding="utf-8") as fout:
        #     json.dump(self.entity_id_dict, fout)
        #
        # with open(os_path.join(self.data_path, "rel_text2id.dict"), "w", encoding="utf-8") as fout:
        #     json.dump(self.relation_id_dict, fout)

        # id2ent = {idx: ent for ent, idx in self.entity_id_dict.items()}
        # id2rel = {idx: rel for rel, idx in self.relation_id_dict.items()}
        # with open(os_path.join(self.data_path, "ent_id2text.dict"), "w", encoding="utf-8") as fout:
        #     json.dump(id2ent, fout)
        # with open(os_path.join(self.data_path, "rel_id2text.dict"), "w", encoding="utf-8") as fout:
        #     json.dump(id2rel, fout)

        # with open(os_path.join(self.data_path, "entities.txt"), "a+", encoding="utf-8") as fout:
        #     for idx in range(self.ent_num, len(id2ent)):
        #         fout.write(id2ent[idx] + '\n')
        #
        # with open(os_path.join(self.data_path, "relations.txt"), "a+", encoding="utf-8") as fout:
        #     for idx in range(self.rel_num, len(id2rel)):
        #         fout.write(id2rel[idx] + '\n')

        # with open(os_path.join(self.data_path, "entity2id.txt"), "w", encoding="utf-8") as fout:
        #     fout.write(str(len(id2ent)) + '\n')
        #     for ent, idx in self.entity_id_dict:
        #         fout.write('\t'.join([ent, str(idx)]) + '\n')
        #
        # with open(os_path.join(self.data_path, "relation2id.txt"), "w", encoding="utf-8") as fout:
        #     fout.write(str(len(id2rel)) + "\n")
        #     for rel, idx in self.relation_id_dict:
        #         fout.write('\t'.join([rel, str(idx)]) + '\n')

        # all_str = list(self.entity_id_dict.keys()) + list(self.relation_id_dict.keys())
        # with open(os_path.join(self.data_path, "amie", "all2id.txt"), "w", encoding="utf-8") as fout:
        #     fout.write(str(len(all_str)) + '\n')
        #     for idx, node in enumerate(all_str):
        #         fout.write('\t'.join([node, str(idx)]) + '\n')

        self.ent_num, self.rel_num = len(self.entity_id_dict), len(self.relation_id_dict)

        with open(os_path.join(self.data_path, "amie", "triples.txt"), 'w', encoding="utf-8") as fout:
            for triple in self.triple_ids:
                fout.write("\t".join(['<' + str(triple[0]) + '>', '<' + str(triple[1] + self.ent_num) + '>',
                                      '<' + str(triple[2]) + '>']) + "\n")

        with open(os_path.join(self.record_path, "valuable_triples.txt"), "a+", encoding="utf-8") as fout:
            for triple in triple_ids:
                fout.write("\t".join([str(_) for _ in triple]) + "\n")

        for triple_id in triple_ids:
            head_id, relation_id, tail_id = triple_id
            try:
                self.entity_edge_h_dict[str(head_id)][str(relation_id)].append(int(tail_id))
            except:
                try:
                    self.entity_edge_h_dict[str(head_id)][str(relation_id)] = [int(tail_id)]
                except:
                    self.entity_edge_h_dict[str(head_id)] = {str(relation_id): [int(tail_id)]}

            with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "w", encoding="utf-8") as fout:
                json.dump(self.entity_edge_h_dict, fout)

    def updateLocalEmbeddings(self):
        all_triples = []
        fin = open(os_path.join(self.data_path, "train_id.txt"), "r", encoding="utf-8")
        for line in fin:
            elems = line.strip().split("\t")
            elems = tuple([int(elem) for elem in elems])
            all_triples.append(elems)

        fin = open(os_path.join(self.data_path, "info.json"), "r", encoding="utf-8")
        n_entity = json.load(fin)["n_entity"]

        head_dataset = UpdateDataset(all_triples, n_entity, self.embed_local_args.negative_sample_size, "head-batch")
        tail_dataset = UpdateDataset(all_triples, n_entity, self.embed_local_args.negative_sample_size, "tail-batch")

        head_data_loader = UpdateDataIterator(
            head_dataset,
            self.valuable_triples,
            max(self.embed_local_args.cpu_num // 2, 1),
            UpdateDataset.collate_fn
        )
        tail_data_loader = UpdateDataIterator(
            tail_dataset,
            self.valuable_triples,
            max(self.embed_local_args.cpu_num // 2, 1),
            UpdateDataset.collate_fn
        )
        train_iterator = UpdateBidirectionalIterator(head_data_loader, tail_data_loader)
        # logger = set_logger(self.args)
        # logger = set_logger(self.embed_local_args, "local_{}_train".format(self.embed_local_args.model))
        self.logger.info("Start Local Update...")
        run(self.embed_local_args, self.logger, train_iterator)

    def updateGlobalEmbeddings(self):

        all_triples = []
        fin = open(os_path.join(self.data_path, "train_id.txt"), "r", encoding="utf-8")
        for line in fin:
            elems = line.strip().split("\t")
            elems = tuple([int(elem) for elem in elems])
            all_triples.append(elems)

        fin = open(os_path.join(self.data_path, "info.json"), "r", encoding="utf-8")
        info = json.load(fin)
        n_entity = info["n_entity"]
        n_relation = info["n_relation"]
        self.embed_global_args.nentity = n_entity
        self.embed_global_args.nrelation = n_relation

        head_dataset = TrainDataset(all_triples, n_entity, n_relation, self.embed_global_args.negative_sample_size, "head-batch")
        tail_dataset = TrainDataset(all_triples, n_entity, n_relation, self.embed_global_args.negative_sample_size, "tail-batch")

        head_data_loader = DataLoader(
            head_dataset,
            batch_size=self.embed_global_args.batch_size,
            shuffle=True,
            num_workers=max(self.embed_global_args.cpu_num // 2, 1),
            collate_fn=TrainDataset.collate_fn
        )
        tail_data_loader = DataLoader(
            tail_dataset,
            batch_size=self.embed_global_args.batch_size,
            shuffle=True,
            num_workers=max(self.embed_global_args.cpu_num // 2, 1),
            collate_fn=TrainDataset.collate_fn
        )
        train_iterator = BidirectionalOneShotIterator(head_data_loader, tail_data_loader)
        # logger = set_logger(self.args)
        # logger = set_logger(args, "global_{}_train".format(args.model))
        self.logger.info("Start Global Update...")
        run(self.embed_global_args, self.logger, train_iterator)

    def load_model(self, args):
        model = torch_load(os_path.join(self.save_path, "model"))
        return model


class updateKnowledgeGraph(object):

    def __init__(self, dataset, data_path, log_path, save_path, local_update_threshold, global_update_times,
                 embedding_model):
        self.dataset = dataset
        self.data_path = data_path
        self.local_update_threshold = local_update_threshold
        self.log_path = log_path
        self.save_path = save_path
        self.global_update_times = global_update_times
        self.embedding_model = embedding_model
        self.embed_current_times = 0

        self.record_path = os_path.join("..", "record", self.dataset)
        self.result_path = os_path.join("..", "result", self.dataset)

        fin = open(os_path.join(self.data_path, "ent_text2id.dict"), "r", encoding="utf-8")
        self.entity_id_dict = json.load(fin)

        fin = open(os_path.join(self.data_path, "rel_text2id.dict"), "r", encoding="utf-8")
        self.relation_id_dict = json.load(fin)

        if os_path.exists(os_path.join(self.record_path, "entity_edge_h_dict.json")):
            with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "r", encoding="utf-8") as fin:
                self.entity_edge_h_dict = json.load(fin)
        else:
            self.build_entity_edge_h_dict()

        self.valuable_triples = []
        if os_path.exists(os_path.join(self.record_path, "valuable_triples.txt")):
            fin = open(os_path.join(self.record_path, "valuable_triples.txt"), "r", encoding="utf-8")
            for line in fin:
                elems = line.strip().split("\t")
                self.valuable_triples.append([int(elem) for elem in elems])

    def build_entity_edge_h_dict(self):
        self.entity_edge_h_dict = {}
        train_triples = loadtxt(os_path.join(self.data_path, "train_id.txt"), dtype=int)
        for triple in train_triples:
            head, relation, tail = triple
            try:
                self.entity_edge_h_dict[str(head)][str(relation)].append(int(tail))
            except:
                try:
                    self.entity_edge_h_dict[str(head)][str(relation)] = [int(tail)]
                except:
                    self.entity_edge_h_dict[str(head)] = {str(relation): [int(tail)]}

    def update(self, triples):
        self.embed_current_times += 1
        triples_ids = self.map_triple2id(triples)
        # ((str, str, str), (), ...)
        valuable_triples, valuable_triples_ids = [], []
        for idx, triple in enumerate(triples_ids):
            if not self.tripleIsExist(triple):
                valuable_triples.append(triples[idx])
                valuable_triples_ids.append(triple)

        self.updateFiles(valuable_triples, valuable_triples_ids)

        if self.global_update_times <= self.embed_current_times:
            self.updateGlobalEmbeddings()
            self.embed_current_times = 0

        elif len(self.valuable_triples) >= self.local_update_threshold:
            self.updateLocalEmbeddings()
            self.valuable_triples = []

    def map_triple2id(self, triples):
        triple_ids = []
        for triple in triples:
            head_id, tail_id = int(self.entity_id_dict[str(triple[0])]), int(self.entity_id_dict[str(triple[2])])
            relation_id = int(self.relation_id_dict[str(triple[1])])
            triple_ids.append([head_id, relation_id, tail_id])
        return triple_ids

    def tripleIsExist(self, triple):
        # ((id, id, id), (), ...)
        try:
            if int(triple[2]) in self.entity_edge_h_dict[str(triple[0])][str(triple[1])]:
                return True
            return False
        except:
            return False

    def updateFiles(self, triples, triple_ids):
        self.valuable_triples += triple_ids

        with open(os_path.join(self.data_path, "train_id.txt"), "a+", encoding="utf-8") as fout:
            for triple in triple_ids:
                fout.write("\t".join([str(_) for _ in triple]) + "\n")

        with open(os_path.join(self.record_path, "valuable_triples.txt"), "a+", encoding="utf-8") as fout:
            for triple in triple_ids:
                fout.write("\t".join([str(_) for _ in triple]) + "\n")

        for triple_id in triple_ids:
            head_id, relation_id, tail_id = triple_id
            try:
                self.entity_edge_h_dict[str(head_id)][str(relation_id)].append(int(tail_id))
            except:
                try:
                    self.entity_edge_h_dict[str(head_id)][str(relation_id)] = [int(tail_id)]
                except:
                    self.entity_edge_h_dict[str(head_id)] = {str(relation_id): [int(tail_id)]}

            with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "w", encoding="utf-8") as fout:
                json.dump(self.entity_edge_h_dict, fout)

    def updateLocalEmbeddings(self):
        args = BasicConfig(data_path=self.data_path, log_path=self.log_path, save_path=self.save_path,
                           dataset=self.dataset)
        args.learning_rate = 0.0001
        args.min_learning_rate = 0.000005
        args.batch_size = 64
        args.negative_sample_size = 32
        args.epoch_num = 1
        args.max_steps = 10
        args.log_steps = 5
        args.do_test = False
        args.continue_train = True
        args.model = self.embedding_model

        all_triples = []
        fin = open(os_path.join(self.data_path, "train_id.txt"), "r", encoding="utf-8")
        for line in fin:
            elems = line.strip().split("\t")
            elems = tuple([int(elem) for elem in elems])
            all_triples.append(elems)

        fin = open(os_path.join(self.data_path, "info.json"), "r", encoding="utf-8")
        n_entity = json.load(fin)["n_entity"]

        head_dataset = UpdateDataset(all_triples, n_entity, args.negative_sample_size, "head-batch")
        tail_dataset = UpdateDataset(all_triples, n_entity, args.negative_sample_size, "tail-batch")

        head_data_loader = UpdateDataIterator(
            head_dataset,
            self.valuable_triples,
            max(args.cpu_num // 2, 1),
            UpdateDataset.collate_fn
        )
        tail_data_loader = UpdateDataIterator(
            tail_dataset,
            self.valuable_triples,
            max(args.cpu_num // 2, 1),
            UpdateDataset.collate_fn
        )
        train_iterator = UpdateBidirectionalIterator(head_data_loader, tail_data_loader)
        # logger = set_logger(self.args)
        logger = set_logger(args, "local_{}_train".format(args.model))
        print("Local Update...")
        run(args, logger, train_iterator)

    def updateGlobalEmbeddings(self):
        args = BasicConfig(data_path=self.data_path, log_path=self.log_path, save_path=self.save_path,
                           dataset=self.dataset)
        args.learning_rate = 0.0005
        args.min_learning_rate = 0.000005
        args.batch_size = 64
        args.negative_sample_size = 32
        args.epoch_num = 1
        args.max_steps = 20
        args.log_steps = 5
        args.do_test = False
        args.continue_train = True
        args.model = self.embedding_model

        all_triples = []
        fin = open(os_path.join(self.data_path, "train_id.txt"), "r", encoding="utf-8")
        for line in fin:
            elems = line.strip().split("\t")
            elems = tuple([int(elem) for elem in elems])
            all_triples.append(elems)

        fin = open(os_path.join(self.data_path, "info.json"), "r", encoding="utf-8")
        info = json.load(fin)
        n_entity = info["n_entity"]
        n_relation = info["n_relation"]
        args.nentity = n_entity
        args.nrelation = n_relation

        head_dataset = TrainDataset(all_triples, n_entity, n_relation, args.negative_sample_size, "head-batch")
        tail_dataset = TrainDataset(all_triples, n_entity, n_relation, args.negative_sample_size, "tail-batch")

        head_data_loader = DataLoader(
            head_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=max(args.cpu_num // 2, 1),
            collate_fn=TrainDataset.collate_fn
        )
        tail_data_loader = DataLoader(
            tail_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=max(args.cpu_num // 2, 1),
            collate_fn=TrainDataset.collate_fn
        )
        train_iterator = BidirectionalOneShotIterator(head_data_loader, tail_data_loader)
        # logger = set_logger(self.args)
        logger = set_logger(args, "global_{}_train".format(args.model))
        print("Global Update...")
        run(args, logger, train_iterator)

    def load_model(self, args):
        model = torch_load(os_path.join(self.save_path, "model"))
        return model

class updateKnowledgeGraphByRules(object):
    def __init__(self, dataset, data_path):
        self.dataset = dataset
        self.data_path = data_path

        self.record_path = os_path.join("..", "record", self.dataset)
        self.result_path = os_path.join("..", "result", self.dataset)

        fin = open(os_path.join(self.data_path, "info.json"), "r", encoding="utf-8")
        info = json.load(fin)
        self.ent_num = info['n_entity']

        fin = open(os_path.join(self.data_path, "ent_text2id.dict"), "r", encoding="utf-8")
        self.entity_id_dict = json.load(fin)

        fin = open(os_path.join(self.data_path, "rel_text2id.dict"), "r", encoding="utf-8")
        self.relation_id_dict = json.load(fin)

        if os_path.exists(os_path.join(self.record_path, "entity_edge_h_dict.json")):
            with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "r", encoding="utf-8") as fin:
                self.entity_edge_h_dict = json.load(fin)
        else:
            self.build_entity_edge_h_dict()

    def build_entity_edge_h_dict(self):
        self.entity_edge_h_dict = {}
        train_triples = loadtxt(os_path.join(self.data_path, "train_id.txt"), dtype=int)
        for triple in train_triples:
            head, relation, tail = triple
            try:
                self.entity_edge_h_dict[str(head)][str(relation)].append(int(tail))
            except:
                try:
                    self.entity_edge_h_dict[str(head)][str(relation)] = [int(tail)]
                except:
                    self.entity_edge_h_dict[str(head)] = {str(relation): [int(tail)]}

    def update(self, new_triples_str):
        triple_ids = self.map_triple2id(new_triples_str)
        self.updateFiles(triple_ids)

    def updateFiles(self, triple_ids):
        with open(os_path.join(self.data_path, "train_id.txt"), "a+", encoding="utf-8") as fout:
            for triple in triple_ids:
                fout.write("\t".join([str(_) for _ in triple]) + "\n")

        with open(os_path.join(self.data_path, "amie", "triples.txt"), "a+", encoding="utf8") as fout:
            for triple in triple_ids:
                fout.write("\t".join(['<' + str(triple[0]) + '>', '<' + str(triple[1] + self.ent_num) + '>',
                                      '<' + str(triple[2]) + '>']) + "\n")

        for triple_id in triple_ids:
            head_id, relation_id, tail_id = triple_id
            try:
                self.entity_edge_h_dict[str(head_id)][str(relation_id)].append(int(tail_id))
            except:
                try:
                    self.entity_edge_h_dict[str(head_id)][str(relation_id)] = [int(tail_id)]
                except:
                    self.entity_edge_h_dict[str(head_id)] = {str(relation_id): [int(tail_id)]}

            with open(os_path.join(self.record_path, "entity_edge_h_dict.json"), "w", encoding="utf-8") as fout:
                json.dump(self.entity_edge_h_dict, fout)

    def map_triple2id(self, triples):
        triple_ids = []
        for triple in triples:
            head_id, tail_id = int(self.entity_id_dict[str(triple[0])]), int(self.entity_id_dict[str(triple[2])])
            relation_id = int(self.relation_id_dict[str(triple[1])])
            triple_ids.append([head_id, relation_id, tail_id])
        return triple_ids

def find(lst, elem):
    pos, anti_pos = [], []
    for idx, e in enumerate(lst):
        if int(e) == int(elem):
            pos.append(idx)
        elif int(e) == -int(elem):
            anti_pos.append(idx)
    return pos, anti_pos


if __name__ == "__main__":
    dataset = "FB15k"
    data_path = os_path.join("..", "data", dataset)
    use_delta = True
    log_path = os_path.join("..", "record", dataset) if not use_delta else os_path.join('..', 'record', dataset, "delta")
    save_path = os_path.join("..", "model", dataset) if not use_delta else os_path.join("..", "model", dataset, "delta")
    local_update_threshold = 20
    global_update_times = 4
    model = updateKnowledgeGraph(dataset, data_path, log_path, save_path, local_update_threshold, global_update_times)

    for i in range(3):
        triples = [(j + i*20, j+1 + i*20, j+2 + i*20) for j in range(10)]
        model.update(triples)