import os
import collections
import re
import jieba
import nltk
from nltk.corpus import stopwords
from wordcloud import WordCloud


class HotWordAnalyse:
    def __init__(self):
        nltk.download('stopwords')
        nltk.download('punkt')
        if not os.path.exists("./result"):
            os.makedirs("./result")

    def data_clean(self, content: str) -> str:
        """
        数据清洗
        :param content:
        :return:
        """
        # 替换特定文字
        content = re.sub(r'【.*?】', '', content)
        # 替换特殊符号
        content = re.sub(r'[^\w\s]', '', content)
        # 替换语气词(景色非常优美 -> 景色优美 -> 得到更高的词频)
        surplus_words = ['很', '感觉','非常', '还是', '也', '还', '都', '挺', '真', '的确', '超级', '确实', '一点']
        for word in surplus_words:
            content = re.sub(word, '', content)

        # 替换错词
        mistaken_words = {
            "纳木错":"纳木措",
            "汗血马":"汗血宝马",
            "海梗":"海埂",
        }
        for mw,cw in mistaken_words.items():
            content = re.sub(mw, cw, content)

        return content

    def data_split(self, content: str) -> list:
        """
        分词
        :param content:
        :return:
        """
        # 停用词
        stop_words = stopwords.words('chinese')
        with open("./resources/custom_stopwords.txt",mode="r",encoding="utf-8") as f:
            stop_words += f.read().split("\n")
        # 分词
        words = jieba.lcut(content, cut_all=False)
        filtered_words = []
        # 过滤
        keep_single_word = ['美'] # 有意义的单个词
        for word in words:
            if len(word.strip()) <= 1 and word not in keep_single_word:
                continue
            if word in stop_words:
                continue
            if word.isdigit():
                continue
            filtered_words.append(word)

        return filtered_words

    def draw_cloud(self, filtered_words,cityName):
        # 画词云图
        wc = WordCloud(font_path='./resources/simhei.ttf',  # 字体文件路径（这里为黑体）
                       background_color='white',  # 背景颜色（这里为白色）
                       width=1000,  # 宽度
                       height=600,  # 高度
                       )
        wc.generate_from_frequencies(filtered_words)
        wc.to_file(f'./result/{cityName}词云图.png')  # 导出成PNG格式图片（使用相对路径）

    def analyse(self, content):
        # 返回结果
        content = self.data_clean(content)
        # 测试jieba分词
        filtered_words = self.data_split(content)
        # 统计词频
        word_counter = collections.Counter(filtered_words)
        return word_counter





