import jieba
import numpy as np
import warnings
from collections import Counter
warnings.filterwarnings('ignore')

class dataClean:
    '''

    为了方便后续修改，这里对数据清洗的四个步骤都进行了函数编写；
    :parameter

    data : 要处理的数据
    highFrequencyFlag : 是否开启高频词获取，False表示进行清洗，True表示训练；

    '''

    # 初始化
    def __init__(self, data, highFrequencyFlag=False):
        self.data = data
        self.highFrequencyFlag = highFrequencyFlag

    # 运行数据清理
    def run(self):
        if self.highFrequencyFlag == True:
            # 高频词获取
            self.highFrequencyTrain()
            return
        else:
            # 数据清洗
            wordListClean = self.stopWordsClean()
            return wordListClean

    #分词
    def wordsSplit(self):
        table = []
        # 如果启用了高频词获取，这里用的应该是content列数据，即文本主体数据；如果未启用，则用标题数据做分析即可，即获取标题数据即可；
        if self.highFrequencyFlag == True:
            table = self.data
        else:
            table = self.data["title"]
        wordList = []
        for sentence in table:
            wordList.append(jieba.lcut(sentence, cut_all=False, HMM=True))
        return wordList

    #高频词获取
    def highFrequencyTrain(self):
        totalCount = 0
        totalWords = []
        threshold = 0.8
        t = 1e-5
        for i in range(len(self.data)):
            totalCount = totalCount + len(self.data[i])
        for j in self.data[i]:
            totalWords.append(j)
        wordsCounter = Counter(totalWords)
        wordFreqs = {w: c / totalCount for w, c in wordsCounter.items()}
        # 计算被删除的概率
        probDrop = {w: 1 - np.sqrt(t / f) for w, f in wordFreqs.items()}
        # 对单词进行采样
        trainWords = [w for w in totalWords if probDrop[w] < threshold]
        train_words = np.array(trainWords)
        np.save(r'train_words.npy', train_words)
        print("完成高频词提取")

    # 剔除高频词
    def highFrequencyClean(self):
        wordsAfterClean = []
        wordList = self.wordsSplit()
        # 加载训练好的高频词集合
        train_words = np.load(r"train_words.npy")
        # 取唯一值
        trainWords = set(train_words)
        for wordList_ in wordList:
            listTemp = []
            for word in wordList_:
                if word not in trainWords:
                    listTemp.append(word)
            wordsAfterClean.append(listTemp)
        return wordsAfterClean

    #停用词处理
    def stopWordsClean(self):
        wordListClean = []
        with open("D:\JupyterProject\编程作业2\中文停用词表.txt", "r") as f:  # 打开文件
            stopWords = f.read()  # 读取文件
        wordsClean = self.highFrequencyClean()
        for wordList in wordsClean:
            listTemp = []
            for word in wordList:
                if (word not in stopWords) & (word != ' ') & (word.isdigit() == False):
                    listTemp.append(word)
            wordListClean.append(listTemp)
        return wordListClean