# encoding: utf-8
# author: 侯佳涛
# file_name: data_utils
# create_time: 2021/2/26, 下午2:53
from collections import Counter

from bert import tokenization


def check_bio(tags):
    """
    检验编码
    :param tags:
    :return:
    """
    for i, tag_list in enumerate(tags):
        for j, tag in enumerate(tag_list):
            if tag == 'O':
                continue

            tag_split = tag.split('-')
            if len(tag_split) != 2 or tag_split[0] not in ['B', 'I']:
                raise Exception('第{}条数据的第{}个字的编码是有非法编码'.format(i, j))
            elif tag_split[0] == 'B':
                continue
            elif j == 0 or tag_list[j - 1] == 'O':
                tag = 'B' + tag[1:]
            elif tag_list[j - 1] == tag:
                continue
            else:
                tag = 'B-' + tag[1:]

    return None


def create_mapping(sentences):
    """
    创建映射关系
    :param sentences:
    :return:
    """
    count_dict = Counter()

    for sentence in sentences:
        for word in sentence:
            count_dict[word] += 1

    count_dict = dict(sorted(count_dict.items(), key=lambda x: -x[-1]))

    return count_dict, dict(zip(count_dict.values(), count_dict.keys()))


def convert_sentence_to_sample(sentence, tag2id, params):
    """
    将sentence处理成标准格式
    :param sentence:
    :param tag2id:
    :param params:
    :return:
    """
    string = []
    labels = []
    for w_t in sentence:
        string.append(w_t[0])
        labels.append(w_t[-1])

    char_line = ' '.join(string)
    char_line = tokenization.convert_to_unicode(char_line)
    tokenizer = tokenization.FullTokenizer(
        vocab_file=params.bert_vocab_file,
        do_lower_case=params.do_lower_case
    )

    tokens = tokenizer.tokenize(char_line)

    if len(tokens) >= params.max_seq_len - 1:
        tokens = tokens[: params.max_seq_len - 2]

    tokens = ['[CLS]'] + tokens + ['[SEP]']
    token_ids = tokenizer.convert_tokens_to_ids(tokens)

    # 长裁短补
    if len(token_ids) < params.max_seq_len:
        token_ids = token_ids + [0] * (params.max_seq_len - len(tokens))
        mask = [1] * len(tokens) + [0] * (params.max_seq_len - len(tokens))
        label_ids = [tag2id[label] for label in labels] + [0] * (params.max_seq_len - len(tokens))
    else:
        token_ids = token_ids[: params.max_seq_len]
        mask = [1] * params.max_seq_len
        label_ids = [tag2id[label] for label in labels][: params.max_seq_len]

    segment_ids = [0] * params.max_seq_len

    return [string, token_ids, mask, segment_ids, label_ids]

