# codding: utf-8
from itertools import chain  # 迭代器
from collections import Counter
from torch.utils.data import Dataset, DataLoader
from config import *

config = Config()
# 获取关系字典
relation2id = {}
with open(config.relation_file_path, 'r', encoding='utf-8') as fr:
    for line in fr:
        relation, index = line.rstrip().split()
        relation2id[relation] = int(index)


# print(f"获取关系字典--->{relation2id}")


def get_data(file_path):
    # 需要获取主实体，客实体，原始数据，实体在文本中的位置
    datas = []  # 原始数据
    labels = []  # 标签
    positionE1 = []  # 主实体位置
    positionE2 = []  # 客实体位置
    entities = []  # 实体
    # 对每个关系进行计数，为后面样本均衡做准备
    count_dict = {key: 0 for key, value in relation2id.items()}
    with open(file_path, 'r', encoding='utf-8') as fr:
        for line in fr:
            line_list = line.rstrip().split(' ', maxsplit=3)
            # 如果样本长度小于4，或者获取的关系不在关系字典中则忽略
            if len(line_list) < 4 or line_list[2] not in relation2id:
                continue
            # 为了控制样本均衡，每个关系只取2600个样本
            if count_dict[line_list[2]] > 2600:
                continue
            # 获取每一条原始数据，并将每一个字符都拆开
            sequence = []
            positionX = []
            positionY = []
            index_x = line_list[-1].index(line_list[0])
            index_y = line_list[-1].index(line_list[1])
            # 获取每一个字符到实体的位置
            for index, char in enumerate(line_list[-1]):
                sequence.append(char)
                positionX.append(index - index_x)
                positionY.append(index - index_y)
            datas.append(sequence)
            positionE1.append(positionX)
            positionE2.append(positionY)
            labels.append(relation2id[line_list[2]])
            entities.append([line_list[0], line_list[1]])
            count_dict[line_list[2]] += 1

    return datas, labels, positionE1, positionE2, entities


def get_word_id(file_path):
    datas, labels, positionE1, positionE2, entities = get_data(file_path)
    vocab = set(chain(*datas))
    word2id = {word: index for index, word in enumerate(vocab)}
    id2word = {index: word for index, word in enumerate(vocab)}
    word2id['<BLANK>'] = len(word2id)  # BLANK 和 PAD 意义一样，填充作用
    word2id['<UNK>'] = len(word2id)  # UNK 表示未登录词
    id2word[len(id2word)] = '<BLANK>'
    id2word[len(id2word)] = '<UNK>'
    return word2id, id2word


def sent2id_padding(sequence, word2id):
    ids = []
    for word in sequence:
        if word not in word2id:
            ids.append(word2id['<UNK>'])
        else:
            ids.append(word2id[word])
    # 截断
    if len(ids) > config.max_len:
        return ids[:config.max_len]
    # 补齐
    ids.extend([word2id['<BLANK>']] * (config.max_len - len(ids)))
    return ids


def pos(num):
    # 将实体位置信息进行转换，因为pos_embedding
    if num < -70:
        return 0
    if num >= -70 and num <= 70:
        return num + 70
    if num > 70:
        return 142


def postion_padding(pos_ids):
    # 把 pos位置信息 转为id形式，并自动补全max_len长度
    pos_ids = [pos(id) for id in pos_ids]
    if len(pos_ids) >= config.max_len:
        return pos_ids[:config.max_len]
    # 补全
    pos_ids.extend([142] * (config.max_len - len(pos_ids)))
    return pos_ids


if __name__ == '__main__':
    # get_data(config.train_file_path)
    get_word_id(config.train_file_path)
