import jieba
import re

vocab = {}  # 通过字典的特性进行去重
list_temp = []


# 通过jieba库来进行分词
def cutWord():
    with open('source.txt', 'r') as file:
        for line in file:
            for word in jieba.cut(line.strip()):
                list_temp.append(word)
        # for w in list_temp:
        #     print(w)


# 在分词过程中进行词频统计，并将分好的内容写入字典
def index(word):
    for item in word:
        # 该部分运行逻辑是：如果字典里没有数据，则以当前遍历对象item作为健，等号后面为值
        # 等号后面逻辑为：以item为索引获取字典对象，没有该数据则创建值，赋值为0，如果有该字典对象，返回该字典对象值并+1
        vocab[item] = vocab.get(item, 0) + 1


# 将字典中的数据写入Wsource文件中
def writeWord():
    with open('Wsoure.txt', 'w') as Wsf:
        for c in vocab:
            Wsf.write(c + ' ' + str(vocab[c]) + '\n')


cutWord()
index(list_temp)
writeWord()



dirty = ['fuck', '狗日的', '犊子', '麻批', '仙人板板', 'R你妈', '操你', '草你', '他']
speak = '你个狗日的，fuckR你妈呦，操你个仙人板板，个老麻批'


# 读取source中的文本
def read_sentence():
    with open("source.txt", 'r') as file:
        listTempA = ''
        for line in file:
            listTempA += line
        return listTempA


# 通过正则表达式处理敏感词
def sentence_filter(keywords, text):
    return re.sub("|".join(keywords), "***", text)


# 将处理完成的语句写入新的文本中
def write_sentence(keywords, text):
    with open("NewSource.txt", 'w') as wf:
        wf.write(sentence_filter(keywords, text))


write_sentence(dirty, read_sentence())
print(sentence_filter(dirty, read_sentence()))
