import re

import jieba.posseg as pseg
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS

from sam import import_txt_path
from sam.app.base import cut_stats_export
from sam.util.fileUtil2 import read_file, write_file_quick, read_file_quick
from sam.util.strUtil import filter_symbol_4_word

valid_ist = ['n', 'nr', 'ns', 'nt', 'nz', 'ag', 'a', 'ad', 'an', 'v', 'vn']

filter_topic_re = re.compile(
    "(#你憧憬什么样的大学生活#)|(#大学后悔没有做的事#)|(#大学时期做过后悔的事#)|(#大学期间最后悔的事#)|(#理想中的大学生活是什么样子#)")

filter_username_re = re.compile("(.+：)|(.+:)")

filter_re = re.compile(
    "(#你憧憬什么样的大学生活#)|(#大学后悔没有做的事#)|(#大学时期做过后悔的事#)|(#大学期间最后悔的事#)|(#理想中的大学生活是什么样子#)|(.+：)|(.+:)")


def analysis(current_search_topic):
    all_list = []
    word_count = {}
    word_type = {}
    lines = read_file(import_txt_path)
    # jieba默认模式
    for line in lines:
        if line:
            new_line = filter_symbol_4_word(line)
            new_line = re.sub(filter_re, "", new_line)
            words = pseg.cut(new_line)
            for word, flag in words:
                if flag in valid_ist:
                    c = word_count.get(word, 0) + 1
                    word_count[word] = c
                    word_type[word] = flag
    else:
        for word, count in word_count.items():
            _type = word_type.get(word)
            _line = [word, _type, count]
            all_list.append(_line)
        else:
            write_file_quick(data_list=all_list, export_file_name=f"{current_search_topic}_分析",
                             optional="cover")


def word_cloud(current_search_topic):
    all_list = []
    lines = read_file(import_txt_path)
    # jieba默认模式
    for line in lines:
        if line:
            new_line = filter_symbol_4_word(line)
            new_line = re.sub(filter_re, "", new_line)
            words = pseg.cut(new_line)
            word_list = []
            for word, flag in words:
                if flag in valid_ist:
                    word_list.append(word)
            all_list.append(" ".join(word_list))

    my_text = " ".join(all_list)
    wc = WordCloud(
        background_color='white',  # 设置背景颜色
        width=500,  # 设置背景宽
        height=350,  # 设置背景高
        max_words=4000,  # 设置最大现实的字数
        stopwords=STOPWORDS,  # 设置停用词
        font_path="simkai.ttf",  # 设置字体格式，如不设置显示不了中文
        max_font_size=50,  # 设置字体最大值
        random_state=10,  # 设置有多少种随机生成状态，即有多少种配色方案
        mode='RGBA'
    )
    wc.generate(my_text)
    wc.to_file(rf"{current_search_topic}.png")
    plt.imshow(wc)
    plt.axis('off')
    plt.show()


def analysis_quick():
    file_list = read_file_quick(file_type='csv')
    cut_stats_export(line_list=file_list, export_file_name="疫情居家-分词与词频统计")


if __name__ == "__main__":
    analysis_quick()
