import os
import wordcloud
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.font_manager as fm
import webbrowser
import pandas as pd
import jieba
from collections import Counter

def word_frequency(filename, stop_words_filename, top_n=10):
    def judge(x):
        if len(x) == 1 or x[0] in ['，', '。', '！', '？', '了', '啊', '啦', '呀', '；']:
            return False
        return True

    stop_words_list = []
    with open(stop_words_filename, 'r', encoding='utf-8') as f:
        f_stop_text = f.read()
        stop_words_list = f_stop_text.split('\n')

    df = pd.read_csv(filename, encoding='ANSI')

    word_list = []
    for contents in df['Comment']:
        seg_list = jieba.cut(contents)
        my_wordList = []
        for myword in seg_list:
            if judge(myword) and myword not in stop_words_list:
                my_wordList.append(myword)
        word_list.extend(my_wordList)

    word_freq = Counter(word_list)

    top_n_freq = word_freq.most_common(top_n)
    result = [f"{word}: {freq}" for word, freq in top_n_freq]
    return result

def generate_word_cloud(filename, stopwords_filename, item_id, result_dir="wordcloud-result", top_n=100,
                        font_path="./typeface/SourceHanSans-Regular.otf",
                        height=800, width=1200, scale=2, mode="RGBA", background_color='#F8F8F8', colormap="Set2",
                        max_font_size=300, random_state=42):
    def judge(x):
        if len(x) == 1 or x[0] in ['，', '。', '！', '？', '了', '啊', '啦', '呀', '；']:
            return False
        return True

    # Load stop words
    stop_words_list = []
    with open(stopwords_filename, 'r', encoding='utf-8') as f:
        f_stop_text = f.read()
        stop_words_list = f_stop_text.split('\n')

    # Read comments from CSV using Pandas
    try:
        df = pd.read_csv(filename, encoding='utf-8')
    except UnicodeDecodeError:
        df = pd.read_csv(filename, encoding='gbk')  # Change encoding to gbk if utf-8 fails
    df = df[df['itemId'] == item_id]
    df.dropna(inplace=True)

    # Word segmentation and filtering
    word_list = []
    for contents in df['Comment']:
        seg_list = jieba.cut(contents)
        my_wordList = []
        for myword in seg_list:
            if judge(myword) and myword not in stop_words_list:
                my_wordList.append(myword)
        word_list.extend(my_wordList)

    # Calculate word frequency
    word_freq = Counter(word_list)

    # Remove stop words again
    for stop_word in stop_words_list:
        if stop_word in word_freq:
            del word_freq[stop_word]

    # Generate word cloud
    word_dic = {}
    for word, freq in word_freq.most_common(top_n):
        word_dic[word] = float(freq)
    wordCloud = wordcloud.WordCloud(font_path=font_path, height=height, width=width, scale=scale,
                                    mode=mode, background_color=background_color, colormap=colormap,
                                    max_words=top_n, max_font_size=max_font_size,
                                    random_state=random_state).generate_from_frequencies(word_dic)

    # Save word cloud image
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    file_name = str(item_id) + "-1.png"
    wordCloud.to_file(os.path.join(result_dir, file_name))

    # Display word cloud image
    plt.imshow(wordCloud)
    plt.axis('off')
    # plt.show()

    # Return word frequency
    top_n_freq = word_freq.most_common(10)
    # result = [f"{word}: {freq}" for word, freq in top_n_freq]
    result = [f"{word}: {freq}" for word, freq in top_n_freq]
    # Generate word frequency image
    sns.set_style("whitegrid")
    sns.set_palette("pastel")

    prop = fm.FontProperties(fname=font_path)

    # Create barplot
    plt.figure(figsize=(12, 8))
    sns.barplot(x=[word for word, freq in top_n_freq], y=[freq for word, freq in top_n_freq])
    plt.xticks(rotation=45, ha="right", fontsize=12, fontproperties=prop)
    plt.xlabel("Words", fontsize=14, fontproperties=prop)
    plt.ylabel("Frequency", fontsize=14, fontproperties=prop)
    plt.title("Word Frequency", fontsize=18, fontproperties=prop)

    freq_file_name = str(item_id) + "-2.png"
    plt.savefig(os.path.join(result_dir, freq_file_name))
    # plt.show()

    return result, file_name, freq_file_name


def generate_word_cloud_threaded(file_path, stopwords_path, product_id, result_dir, top_n, font_path, height, width,
                                 scale, mode, background_color, colormap, max_font_size, random_state, callback):
    global word_cloud_path, freq_path

    # 生成词云图和词频统计图
    result, word_cloud_path, freq_path = generate_word_cloud(file_path, stopwords_path, product_id, result_dir, top_n,
                                                             font_path, height, width, scale, mode, background_color,
                                                             colormap, max_font_size, random_state)

    # 调用回调函数，将结果传递给主线程
    callback(result, word_cloud_path, freq_path)


def update_gui(result, word_cloud_path, freq_path):
    # 更新用户界面，显示词云图和词频统计图
    if result == []:
        sg.Popup('获取词云失败！', font='黑体', text_color='red')
    else:
        window['-list2-'].update(
            values=result
        )
        window['-FrameT1-'].update(visible=False)
        window['-FrameT2-'].update(visible=True)
        window['-FrameT3-'].update(visible=False)
        window['-Image1-'].update(
            filename=word_cloud_path
        )
        window['-Image2-'].update(
            filename=freq_path
        )


# Test
# result, word_cloud_path, freq_path = generate_word_cloud("comment.csv", "stopwords.txt", 1,
#                                                          result_dir="wordcloud-result", top_n=100,
#                                                          font_path="./typeface/SourceHanSans-Regular.otf",
#                                                          height=1600, width=2400, scale=3, mode="RGBA",
#                                                          background_color='#252525', colormap="Pastel1",
#                                                          max_font_size=800, random_state=42)
# print(type(result))
# print(result)
# print("Word Cloud saved at:", os.path.join("wordcloud-result", word_cloud_path))
# print("Word Frequency image saved at:", os.path.join("wordcloud-result", freq_path))
# Open word cloud image in default web browser
# webbrowser.open(os.path.join("wordcloud-result", word_cloud_path))

# Open word frequency image in default web browser
# webbrowser.open(os.path.join("wordcloud-result", freq_path))
