import string
from wordcloud import WordCloud, STOPWORDS  #生成词云、通用词
import matplotlib.pyplot as plt  # 在线显示

#定义函数mapper,用于构建词语列表
def mapper(file):
    word_sep=[]
    word_map={}
    punctuation=['？','！','，','。','；','：','“','”','’','‘','\n','\u3000','(',')','、', '。']
    stopwords=["之","的","一","他","她","我","我们","可以","你","里","去",
               "来","那","在","上","下","了","又","是","这","着","也","人",
               '不','有','一个','一面','什么','无非','不是','那里','这么','你们','众人','回来','过去','在家','这里','自己','看见','二人','出来','前来','起来','想起','三四','一旁',
               '一时','东西','怎么','话说','只得','不便','可巧','前儿','却说','然后','谁知','近日','回家','说道','遇见']
    # 创建一个包含所有ASCII字母的列表
    letters = list(string.ascii_letters)  # 包括大小写字母

    # 创建一个包含所有ASCII数字的列表
    numbers = list(string.digits)
    stopwords += letters + numbers
    with open(file,'r',encoding="utf-8") as f:
        text=f.readlines()    
    count = 0
    for para in text:                      #取前50列表测试
        para0 = para.split("\n")
        jump = 0
        comment = False
        for para1 in para0:
            words=para.split(" ")
            for word in words:
                #跳过脂批部分
                if "{" in word: 
                    jump += 1
                    continue
                if "}" in word: 
                    jump -= 1
                    continue
                if jump > 0: continue

                # 跳过latex注释行

                if "\\" in word: 
                    comment = False
                    continue
                if "%" in word: 
                    comment = True
                    continue
                if comment: 
                    print(f"comment of latex : {word}")
                    continue
                if word in punctuation:continue         #去除标点符号
                if word in stopwords:continue           #去除终止词
                if len(word)<2:continue                 #去除单个字
                if all(0x0000 <= ord(char) <= 0x007F for char in word):
                    print(f"char {word}")
                    continue
                if ":" in word: continue
                if "：" in word: continue
                if "。" in word: continue
                if "\r" in word or "\n" in word:
                    print("multi line? will be ignored")
                    print(f"multiline {word}")
                    continue

                count += 1
                if count < 100:
                    print(f"word {word}")
                
                word_sep.append(word)                   #将分好的词语添加到空列表
    return word_sep

#定义函数，用于词频统计
def reducer(word_dict):
    word_freq={}
    for key in word_dict:        
        if key in word_freq:
            word_freq[key]+=1            
        else:
            word_freq[key]=1
    word_freq=sorted(word_freq.items(),key=lambda x:x[1],reverse=True)
    return dict(word_freq)       
        
#主函数
if __name__=="__main__":
    file=r'D:\code\HongLouMeng-ZhiPingBen\tex_src\book\seg\chapter08_seg.tex'
    word_dict=mapper(file)    
    word_freq=reducer(word_dict)
    #print("文字出现的频率为:",word_freq)
    wc = WordCloud(background_color="white",width=600, height=400, margin=5,font_path="C:/Windows/Fonts/simsun.ttc")
    wc.generate_from_frequencies(word_freq)
    plt.imshow(wc)
    plt.axis("off")
    plt.show()