import numpy as np
import requests
import flashtext
import jieba

class SensitiveWordModel(object):
    def __init__(self, word_url):
        self.word_url = word_url
        self.spam = set()
        self.nospam = {}
        self.word_filter = flashtext.KeywordProcessor()
        self.__get_words_info()
        self.word_filter.add_keywords_from_list(list(self.spam))

    def predict(self, text):
        text = text.lower()
        _ = set(jieba.cut(text))
        if self.word_filter.extract_keywords(text):
            return 1
        else:
            return min(sum(sorted([self.nospam.get(w, 0) for w in _][:3], reverse=True)) / 3, 1)
    def prdeict_2(self):
        
        return 1



    def __get_words_info(self):
        with open(self.word_url, 'r', encoding='utf-8') as file:
            for line in file:
                line = line.strip()
                if not line:
                    continue
                parts = line.split('\t')
                if len(parts) < 2:
                    continue
                word = parts[0]
                type_flag = parts[1]
                if type_flag == '0':
                    if len(parts) > 2:
                        self.nospam[word] = np.asarray(parts[2:], dtype=float).sum()
                    else:
                        self.nospam[word] = 0
                else:
                    self.spam.add(word)


if __name__ == '__main__':
    word_url = 'F:\\数据集\\srtp2024\\中文敏感词词典\\ChineseSensitiveVocabulary-master\\chinese_sensitive_vocabulary\\data\\words.txt'
    text = '帅哥'
    swm = SensitiveWordModel(word_url)
    print(text + "的骂人值：")
    print(swm.predict(text))
#