import os
import jieba
import argparse
import numpy as np
import pandas as pd
from PIL import Image
from wordcloud import WordCloud


def load_text(df, col_num, row_num, flag=False):
    # 停用词表
    # strip是去除起始和末尾处的空白
    stopwords = [i.strip() for i in open('stopwords.txt', encoding='utf-8').readlines()]
    # 拼接弹幕文本
    text = ''
    for r in range(row_num):
        for c in range(1, col_num):
            comment = str(df.iloc[r, c])
            if str(comment) == 'nan':
                continue
            print(comment)
            if flag:
                cut_li = jieba.lcut(comment)  # 分词
                for word in cut_li:
                    if word in stopwords:
                        text += word + ' '
            else:  # 不分词
                text += comment + ' '
    return text


if __name__ == '__main__':
    parse = argparse.ArgumentParser()
    parse.add_argument("--input", type=str, required=True, help="input xlsm file")
    parse.add_argument("--cut", action='store_true', required=False, default=False, help="cut words")

    args = parse.parse_args()
    flag = args.cut
    file = args.input  # 'comments_month_all.xlsx'

    if not os.path.isdir(file):
        print("指定的文件不存在")
        raise FileNotFoundError

    # 导入数据
    df = pd.read_excel(file)
    row_num, col_num = df.shape
    text = load_text(df, col_num, row_num, flag)

    bg_img = np.array(Image.open('bg.jpg'))
    font = 'chinese.simhei.ttf'
    # 如果要显示中文，需要设置字体
    wcd = WordCloud(mask=bg_img, background_color='white', repeat=True, max_words=150, font_path=font)
    wcd.generate(text)
    res_img = wcd.to_image()
    res_img.save('out.jpg')
