import numpy as np
import util
import unicodedata


class Extract_GCN_Traindata:

    def __init__(self):
        pass

    def build_vocab(self, in_path, out_path):
        data = np.load(in_path, allow_pickle=True)
        word_counts = {}
        for i in range(len(data)):
            words = data[i][2]
            ners = data[i][3]
            for j in range(len(words)):
                word = words[j][0]
                ner = int(ners[0][0][j+1])
                if word_counts.get((word, ner)):
                    word_counts[(word, ner)] = word_counts.get((word,ner)) + 1
                else:
                    word_counts[(word, ner)] = 1
        with open(out_path, 'w', encoding='utf-8') as f:
            for index, ((word, ner), count) in enumerate(sorted(word_counts.items(), key=lambda item:item[1], reverse=True),1):
                print(word, ner, count, sep='\t', file=f)


    def read_vocab(self,vocab_path):
        word_dic = {('root', 0): 0}
        with open(vocab_path, 'r', encoding='utf-8') as f:
            for index, line in enumerate(f, 1):
                line = line.split('\t')
                word = line[0]
                ner = line[1]
                word_dic[(word, int(ner))] = index
        return word_dic

    def extract_data(self, in_path, out_path):
        data = np.load(in_path, allow_pickle=True)
        global_word_dic = self.read_vocab(util.get_project_root()+'/data/gcn_vocb.txt')
        word_count = len(global_word_dic)
        all_data = []
        sent_data = []
        node_feature = np.zeros((word_count, 5))
        node_mask = np.eye(word_count)

        for i in range(len(data)):
            words = data[i][2]
            ners = data[i][3][0][0].astype(int)
            dps = data[i][3][2:]
            relations = data[i][4]
            words.insert(0, ('root', 'head'))
            adjcencies = []
            # x_source_index = []
            # x_target_index = []
            # y = []
            # single_train_mask = np.zeros(word_count)

            # word_label = np.zeros((word_count,))
            edge_label = []
            edge_source_index = []
            edge_target_index = []

            sample_node_count = len(words)
            for j in range(sample_node_count):
                index = global_word_dic[(words[j][0], ners[j])]
                node_feature[index][ners[j]] = 1
                # word_label[index] = 1

                if ners[j] != 0:
                    for t in range(len(words)):
                        if ners[t] != 0:
                            edge_source_index.append(global_word_dic[(words[j][0], ners[j])])
                            edge_target_index.append(global_word_dic[(words[t][0], ners[t])])
                            edge_label.append(relations[t][j])
            # for j in range(len(words)):
            #     index = global_word_dic[(words[j][0], ners[j])]
            #     # single_train_mask[index] = True
            #     node_feature[index][ners[j]] = 1
            #     if ners[j] == 0:
            #         continue
            #     for t in range(len(words)):
            #         if ners[t] != 0:
            #             x_source_index.append(global_word_dic[(words[j][0], ners[j])])
            #             x_target_index.append(global_word_dic[(words[t][0], ners[t])])
            #             # y.append(relations[t][j])
            #             y.append(1)

            for k in range(len(dps)):
                adjacencies_source_index = []
                adjacencies_target_index = []
                adjacencies_value = []
                for m in range(dps.shape[-1]):
                    for n in range(dps.shape[-1]):
                        source_index = global_word_dic[(words[n][0], ners[n])]
                        target_index = global_word_dic[(words[m][0], ners[m])]
                        adjacencies_source_index.append(source_index)
                        adjacencies_target_index.append(target_index)
                        adjacencies_value.append(dps[k][m][n])
                adjacencies_index = (adjacencies_source_index, adjacencies_target_index)
                single_chanel_adjacencies = (adjacencies_value, adjacencies_index)
                adjcencies.append(single_chanel_adjacencies)
            if edge_label:
                single_data = [adjcencies, edge_source_index, edge_target_index, edge_label]
                sent_data.append(single_data)
        all_data = [sent_data, node_feature, node_mask]

        np.save(out_path, all_data)


if __name__ == '__main__':
    extractor = Extract_GCN_Traindata()
    extractor.build_vocab(util.get_project_root()+'/data/dp_graph_train_data.npy', util.get_project_root()+'/data/gcn_vocb.txt')
    extractor.extract_data(util.get_project_root()+'/data/dp_graph_train_data.npy', util.get_project_root()+'/data/gcn_train_data.npy')

# extract_gcn_traindata('{}/data/dp_graph_train_data.npy'.format(util.get_project_root()),'graph_vocab.txt')