# -*- coding:utf-8 -*-
# @Time : 2022-2-15 10:20 
# @Author : suny570
# @Site   : 
# @File : label_words.py 
# @Software: PyCharm

# 由于ES没有安装IK分词器，本地进行安装
import string
import os
import jieba
root_path = os.path.abspath(os.path.dirname(__file__))
jieba.load_userdict(root_path + '/resources/userdict.txt')

class LabelWords(object):
    def __init__(self, words):
        self.vocab = self.remove_repeat_words(words)

    def remove_repeat_words(self,words):
        # 长度大于1，而且丢到特殊字符,如果非特殊字符，则加到集合里面
        tmp_set = set()
        for word in words:
            if len(word) > 1:
                if not self.contains_en_dg(word):
                    tmp_set.add(word)
        return list(set(tmp_set))

    def highlight_used_es_hits(self, query, content):
        """
        直接利用ES里面的标记词汇，避免分词不一致
        :param content:
        :return:
        """
        ret_cont = content
        if self.vocab is None or len(self.vocab) == 0:
            # ES没有标记词汇的话，本地处理
            return self.local_words_segment(query, content)
        else:
            for word in self.vocab:
                # 关键词两边加个<em></em>
                new_word = '<em>' + word + '</em>'
                ret_cont = ret_cont.replace(word, new_word)

        return ret_cont

    def local_words_segment(self, title, content):
        # 本地分词进行内容标记
        seg_result = jieba.lcut(title)
        vocab = set()
        ret_cont = content
        for i in seg_result:
            if len(i) > 1:
                vocab.add(i)
        for word in vocab:
            # 关键词两边加个<em></em>
            new_word = '<em>' + word + '</em>'
            ret_cont = ret_cont.replace(word, new_word)
        return ret_cont

    def label_words_content(self,content):
        # 高亮词语优先传递，只标注结果
        if content is None or len(content) < 1:
            return ""
        ret_cont = content.replace('<em>', '')
        ret_cont = ret_cont.replace('</em>', '')
        for word in self.vocab:
            # 关键词两边加个<em></em>
            new_word = '<em>' + word + '</em>'
            ret_cont = ret_cont.replace(word, new_word)
        return ret_cont

    def contains_en_dg(self, s):
        '''找出字符串中的中英文、空格、数字、标点符号个数'''
        count_en = count_dg = count_sp = count_zh = count_pu = 0

        for c in s:
            # 英文
            if c in string.ascii_letters:
                count_en += 1
            # 数字
            elif c.isdigit():
                count_dg += 1
            # 空格
            elif c.isspace():
                count_sp += 1
            # 中文
            elif c.isalpha():
                count_zh += 1
            # 特殊字符
            else:
                count_pu += 1

        total_chars = count_en + count_sp + count_dg + count_pu
        if total_chars > 0:
            return True
        else:
            False


if __name__ == '__main__':
    text = '<em>北</em><em>京</em><em>2008</em><em>年</em><em>奥</em><em>运</em><em>会</em>闭幕时间？'

    # seg_result = jieba.lcut('央视新闻消息，今天，中国选手谷爱凌参加坡面障碍技巧决赛。第二跳失误摔倒的情况下，谷爱凌顶住压力，第三跳稳稳发挥，流畅落地，第三跳拿到86.23分，最终摘得一枚银牌。')
    # print(str(seg_result))
    # lw = LabelWords([])
    # ret_label = lw.local_words_segment('谷爱凌', '这是谷爱凌在北京冬奥会上参加的第二个项目，在此前的自由式滑雪女子大跳台比赛中，她已夺得一枚金牌。截至目前，谷爱凌已摘一金一银。')
    # print(ret_label)

    seg_result = jieba.lcut('沃进万家的优惠活动')
    print(seg_result)
    lw = LabelWords([])
    ret_label = lw.local_words_segment('沃进万家', '沃进万家的优惠活动')
    print(ret_label)