import os, re, jieba, collections
import matplotlib.pyplot as plt
import numpy as np
import jieba.analyse
from PIL import Image
from wordcloud import WordCloud, STOPWORDS


def keyword(text, maxwords, projectID):
    base_path = os.path.abspath(os.path.dirname(__file__))
    text1 = re.sub(r"[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\。\@\#\\\&\*\%]", "", text)
    words = jieba.cut(text1, cut_all=False)
    wordfreq = jieba.analyse.extract_tags(text1, topK=maxwords, withWeight=False)  # 分词
    wordweight = jieba.analyse.extract_tags(text1, topK=maxwords, withWeight=True)
    wt = "/".join(wordfreq)
    # .join的作用是把所有的空格用/代替掉，这样就分出了一堆带有/的句子
    words_all = "/".join(words)
    highwords = {}
    for i in wordfreq:
        highwords[i] = words_all.count(i)
    highwordsfile = base_path + "/static/highwords/" + str(projectID) + ".txt"
    with open(highwordsfile, "w", encoding="utf-8") as f:
        f.write("word" + "\t" + "次数" + "\t" + "权重" + "\n")
        for i in range(len(wordweight)):
            f.write(wordweight[i][0] + "\t" + str(highwords[wordweight[i][0]]) + "\t" + str(wordweight[i][1]) + "\n")
        f.close()
    '''
    print(text1)
    print(words)
    print(wordfreq)
    print(wordweight)
    print(highwords)
    print(wt)
    '''
    return (wt, highwordsfile)


# wt = keyword("合法空间的哈哈哈哈杀杀杀啊啊啊啊帆帆帆帆",3,1)[0]
def WC(wt, font, shape, maxwords, projectID):
    base_path = os.path.abspath(os.path.dirname(__file__))
    img = plt.imread(shape)
    sw = set(STOPWORDS)
    wc = WordCloud(scale=6, background_color="white", max_words=maxwords, font_path=font, max_font_size=80,
                   stopwords=sw, mask=img, random_state=30)
    wordcloud = wc.generate(wt)
    wordcloudPath = base_path + "/static/wordcloud/" + str(projectID) + ".jpg"
    wordcloud.to_file(wordcloudPath)
    print(wordcloudPath)
    return wordcloudPath
# print(WC(wt,"simsun.ttc",base_path+"/static/images/2.JPG",10,1))
