import pandas as pd
import jieba
from collections import Counter


def word_frequency(filename, stop_words_filename, top_n=10):
    def judge(x):
        if len(x) == 1 or x[0] in ['，', '。', '！', '？', '了', '啊', '啦', '呀', '；']:
            return False
        return True

    stop_words_list = []
    with open(stop_words_filename, 'r', encoding='utf-8') as f:
        f_stop_text = f.read()
        stop_words_list = f_stop_text.split('\n')

    df = pd.read_csv(filename, encoding='ANSI')

    word_list = []
    for contents in df['Comment']:
        seg_list = jieba.cut(contents)
        my_wordList = []
        for myword in seg_list:
            if judge(myword) and myword not in stop_words_list:
                my_wordList.append(myword)
        word_list.extend(my_wordList)

    word_freq = Counter(word_list)

    top_n_freq = word_freq.most_common(top_n)
    result = [f"{word}: {freq}" for word, freq in top_n_freq]
    return result
