import os
import pandas as pd
import numpy as np
import random

class KnowledgeGraph:
    def __init__(self, data_dir='data/FB15k/',has_text=False):
        self.has_text = has_text
        self.data_dir = data_dir
        self.entity_dict = {}
        self.relation_dict = {}
        self.entity_list = []
        self.entity_number = 0
        self.relation_number = 0
        self.training_triples = []  # list of triples in the form of (h, t, r)
        self.validation_triples = []
        self.test_triples = []
        self.training_triple_number = 0
        self.validation_triple_number = 0
        self.test_triple_number = 0
        '''load dicts and triples'''
        self.load_dict_files()
        self.load_triple_files()
        '''construct pools after loading'''
        self.training_triple_pool = set(self.training_triples)
        self.all_triples = set(self.training_triples) | set(self.validation_triples) | set(self.test_triples)
        if self.has_text:
            self.word_dic = {}
            self.word_num = 0
            self.entity_sequence_list = []
            self.sequence_max_length = 0
            self.load_word_dic_files()
            self.load_entity_sequence_files()

    def load_word_dic_files(self):
        word_dic_file = 'word2id.txt'
        print('-----Loading word dict-----')
        word_df = pd.read_table(os.path.join(self.data_dir,word_dic_file),header=None)
        self.word_dic = dict(zip(word_df[0],word_df[1]))
        self.word_num = len(self.word_dic)
        print('#word number:{}'.format(self.word_num))

    def load_entity_sequence_files(self):
        entity_words_file = 'entity_words.txt'
        entity_words_df = pd.read_table(os.path.join(self.data_dir,entity_words_file),header=None)
        _entity_sequence_list = [[0]] * self.entity_number
        for row in entity_words_df.values:
            _entity_sequence_list[self.entity_dict[row[0]]] = self.sequence2ids(row[2].strip())
        self.sequence_max_length = max(map(lambda x:len(x),_entity_sequence_list))
        print('#sequence max length:{}'.format(self.sequence_max_length))
        for e in _entity_sequence_list:
            self.entity_sequence_list.append(self.sequence_id_pad(e,self.sequence_max_length))

    def sequence2ids(self, sequence, sep=' '):
        s = sequence.split(sep)
        ids = []
        for ss in s:
            if ss not in self.word_dic:
                ids.append(self.word_dic['<UNK>'])
            else:
                ids.append(self.word_dic[ss])
        return ids


    def sequence_id_pad(self, list, lenth, padstr='<PAD>'):
        pad_num = lenth-len(list)
        return list+pad_num*[self.word_dic[padstr]]

    def load_dict_files(self):
        entity_dict_file = 'entity2id.txt'
        relation_dict_file = 'relation2id.txt'
        print('-----Loading entity dict-----')
        entity_df = pd.read_table(os.path.join(self.data_dir, entity_dict_file), header=None)
        self.entity_dict = dict(zip(entity_df[0], entity_df[1]))
        self.entity_number = len(self.entity_dict)
        self.entity_list = list(self.entity_dict.values())
        print('#entity number: {}'.format(self.entity_number))
        print('-----Loading relation dict-----')
        relation_df = pd.read_table(os.path.join(self.data_dir, relation_dict_file), header=None)
        self.relation_dict = dict(zip(relation_df[0], relation_df[1]))
        self.relation_number = len(self.relation_dict)
        print('#relation number: {}'.format(self.relation_number))

    def load_triple_files(self):
        training_file = 'train.txt'
        validation_file = 'valid.txt'
        test_file = 'test.txt'
        print('-----Loading training triples-----')
        training_df = pd.read_table(os.path.join(self.data_dir, training_file), header=None)
        self.training_triples = list(zip([self.entity_dict[h] for h in training_df[0]],
                                         [self.entity_dict[t] for t in training_df[1]],
                                         [self.relation_dict[r] for r in training_df[2]]))
        self.training_triple_number = len(self.training_triples)
        print('#training triple number: {}'.format(self.training_triple_number))
        print('-----Loading validation triples-----')
        validation_df = pd.read_table(os.path.join(self.data_dir, validation_file), header=None)
        self.validation_triples = list(zip([self.entity_dict[h] for h in validation_df[0]],
                                           [self.entity_dict[t] for t in validation_df[1]],
                                           [self.relation_dict[r] for r in validation_df[2]]))
        self.validation_triple_number = len(self.validation_triples)
        print('#validation triple number: {}'.format(self.validation_triple_number))
        print('-----Loading test triples------')
        test_df = pd.read_table(os.path.join(self.data_dir, test_file), header=None)
        self.test_triples = list(zip([self.entity_dict[h] for h in test_df[0]],
                                     [self.entity_dict[t] for t in test_df[1]],
                                     [self.relation_dict[r] for r in test_df[2]]))
        self.test_triple_number = len(self.test_triples)
        print('#test triple number: {}'.format(self.test_triple_number))

    def get_all_word_ids(self):
        all_word_ids = []
        for e in self.entity_sequence_list:
            all_word_ids.append(self.sequence_id_pad(e, self.sequence_max_length))
        return all_word_ids

    def next_raw_batch(self, batch_size):
        rand_idx = np.random.permutation(self.training_triple_number)
        start = 0
        while start < self.training_triple_number:
            end = min(start + batch_size, self.training_triple_number)
            yield [self.training_triples[i] for i in rand_idx[start:end]]
            start = end

    def generate_training_batch(self, in_queue, out_queue):
        while True:
            raw_batch = in_queue.get()
            if raw_batch is None:
                return
            else:
                batch_pos = raw_batch
                batch_neg = []
                corrupt_head_prob = np.random.binomial(1, 0.5) #get random result:0 or 1
                # create batch_neg
                for head, tail, relation in batch_pos:
                    head_neg = head
                    tail_neg = tail
                    while True:
                        if corrupt_head_prob:
                            head_neg = random.choice(self.entity_list)
                        else:
                            tail_neg = random.choice(self.entity_list)
                        if (head_neg, tail_neg, relation) not in self.training_triple_pool:
                            break
                    batch_neg.append((head_neg, tail_neg, relation))
                if not self.has_text:
                    out_queue.put((batch_pos, batch_neg))
                else:
                    batch_word_ids_pos = self.get_batch_sequence_id(batch_pos)
                    batch_word_ids_neg = self.get_batch_sequence_id(batch_neg)
                    out_queue.put((batch_pos,batch_neg,batch_word_ids_pos,batch_word_ids_neg))

    def get_batch_sequence_id(self, batch):
        head_list = []
        tail_list = []
        for item_batch in batch:
            head_list.append(self.entity_sequence_list[item_batch[0]])
            tail_list.append(self.entity_sequence_list[item_batch[1]])
        return zip(head_list,tail_list)