import pandas as pd
import jieba
from wordcloud import WordCloud
import matplotlib .pyplot as plt
import jieba.analyse
import matplotlib
import os
df_news=pd.read_table('C:\\Users\Administrator\PycharmProjects\pythonProject2\data.txt',names=['category','theme','URL','content'],encoding='utf-8')
def_news=df_news.dropna()
print(def_news.tail())
print(df_news.shape)
content=df_news.content.values.tolist()
print(content[4399])

content_S=[]
for page in content:
    current_segment=jieba.lcut(page)
    if len(current_segment)>1 and current_segment != '\r\n':
        content_S.append(current_segment)
print(content_S[4399])
df_content=pd.DataFrame({'content_S':content_S})
print(df_content.head())
stopwords=pd.read_csv('stopwords.txt',index_col=False,sep='\t',quoting=3,names=['stopword'],encoding='utf-8')
print(stopwords.head(10))

def drop_stopwords(contents,stopwords):
    contents_clean=[]
    all_words=[]
    for line in contents:
        line_clean=[]
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    return contents_clean,all_words
contents=df_content.content_S.values.tolist()
stopwords=stopwords.stopword.values.tolist()
contents_clean,all_words=drop_stopwords(contents,stopwords)
df_content=pd.DataFrame({'contents_clean':contents_clean})
print(df_content.tail())
df_all_words=pd.DataFrame({'all_words':all_words})
print(df_all_words.tail())

matplotlib.rcParams['figure.figsize']=(10.0 ,5.0)
wordcloud_data=df_all_words.all_words.value_counts()[:100]
wordcloud=WordCloud(font_path='C:\\Users\Administrator\PycharmProjects\pythonProject2\data\simhei.ttf',background_color='white',max_font_size=80)
wordcloud=wordcloud.fit_words(wordcloud_data)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()