# coding=utf-8
# 分析验证LaserTagger中转化为标签序列的算法
import tagging, utils


def get_token_list(text):
    # return list(text)
    return text.split()


label_map = utils.read_label_map('/ssd/share/processed-data/label_map_content_synonymous_1w.txt')
_phrase_vocabulary = utils.get_phrase_vocabulary_from_label_map(label_map)

# 获取所有add phrase中的最长的短语的长度 _max_added_phrase_length
_max_added_phrase_length = 0
_token_vocabulary = set()
print("vocabulary_size:", len(_phrase_vocabulary))
for phrase in _phrase_vocabulary:
    if len(phrase) == 39:
        print("max phrase:", phrase)
    tokens = get_token_list(phrase)
    _token_vocabulary |= set(tokens)
    if len(tokens) > _max_added_phrase_length:
        _max_added_phrase_length = len(tokens)
print("_max_added_phrase_length:", _max_added_phrase_length)


def _compute_single_tag(source_token, target_token_idx, target_tokens):
    source_token = source_token.lower()
    target_token = target_tokens[target_token_idx].lower()
    if source_token == target_token:
        return tagging.Tag('KEEP'), target_token_idx + 1

    added_phrase = ''
    for num_added_tokens in range(1, _max_added_phrase_length + 1):
        if target_token not in _token_vocabulary:
            break
        # added_phrase += (' ' if added_phrase else '') + target_token
        added_phrase += target_token
        next_target_token_idx = target_token_idx + num_added_tokens
        if next_target_token_idx >= len(target_tokens):
            break
        target_token = target_tokens[next_target_token_idx].lower()
        if (source_token == target_token and
                added_phrase in _phrase_vocabulary):
            return tagging.Tag('KEEP|' + added_phrase), next_target_token_idx + 1
    # 直接过滤掉
    return tagging.Tag('DELETE'), target_token_idx


def _find_first_deletion_idx(source_token_idx, tags):
    for idx in range(source_token_idx, 0, -1):
        if tags[idx - 1].tag_type != tagging.TagType.DELETE:
            return idx
    return 0


def compute_tags(sources, targets):
    source_tokens = get_token_list(sources)
    target_tokens = get_token_list(targets)
    print("source_tokens length:", len(source_tokens))
    print("target_tokens length:", len(target_tokens))
    tags = [tagging.Tag('DELETE') for _ in source_tokens]
    # print(' '.join(['%s %s' % (tag.tag_type.name, tag.added_phrase) for tag in tags]))
    source_token_idx = 0
    target_token_idx = 0
    while target_token_idx < len(target_tokens):
        tags[source_token_idx], target_token_idx = _compute_single_tag(
            source_tokens[source_token_idx], target_token_idx, target_tokens)
        if tags[source_token_idx].added_phrase:
            first_deletion_idx = _find_first_deletion_idx(
                source_token_idx, tags)
            if first_deletion_idx != source_token_idx:
                tags[first_deletion_idx].added_phrase = tags[source_token_idx].added_phrase
                tags[source_token_idx].added_phrase = ''
        source_token_idx += 1
        if source_token_idx >= len(tags):
            break
    if target_token_idx >= len(target_tokens):
        return tags
    return []


if __name__ == '__main__':
    # sources = "因此，六本木地区也被美国国务院、英国驻日大使馆、等列为游客危险地区。"
    # targets = "因此，美国国务院把六本木地区、英国驻日大使馆等列为游客的危险区。"
    sources = "因此 ，六本木地区 也 被 美国国务院、英国驻日大使馆 、 等 列为 游客 危险 地区 。"
    targets = "因此 ， 美国国务院 把 六本木地区 、 英国驻日大使馆 等 列为 游客 的 危险 区 。"
    tags = compute_tags(sources, targets)
    if len(tags) == 0:
        print("this sentence is filterd")
    print(' '.join(['%s %s' % (tag.tag_type.name, tag.added_phrase) for tag in tags]))
