
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['font.serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
from wordcloud import WordCloud,STOPWORDS,ImageColorGenerator
import pandas as pd
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation

def plot_pie(df, groupby_name, groupby_sum_name, labels_name, fraces_name):
    df_group = df.groupby(groupby_name)[groupby_sum_name].sum().reset_index()
    labels = df_group[labels_name].to_list()
    fraces = df_group[fraces_name].to_list()
    colors = cm.rainbow(np.arange(len(labels))/len(labels))
    explode = []
    for i in range(len(labels)):
        explode = explode + [0.1]
    plt.axes(aspect=1)
    plt.pie(x=fraces, labels= labels, autopct='%1.1f%%', colors=colors, explode= explode, shadow=True)
    plt.show()

def WordsCloud(df, cloumn_name, image_path,font_path):
    text = ','.join(df[cloumn_name].to_list())
    stopwords = STOPWORDS.copy()
    bg_image = plt.imread(image_path)
    cut_text = " ".join(jieba.cut(text))
    Words_Cloud = WordCloud(width=1024, height=178, background_color='white', mask=bg_image, stopwords=stopwords,
                                max_font_size=400, random_state=50, font_path=font_path).generate(cut_text)
    plt.imshow(Words_Cloud)
    plt.axis("off")
    plt.show()

dsj_df = pd.read_csv('俄国文学.csv', encoding='utf-8')

plot_pie(df=dsj_df, groupby_name='数据库', groupby_sum_name='下载次数', labels_name='数据库', fraces_name='下载次数')

result = dsj_df.drop_duplicates()
WordsCloud(df=result, cloumn_name='论文名', image_path='1.png',font_path='simkai.ttf')


# jieba.load_userdict('last_new_cutwords.txt')
# data_cut=result['论文名'].apply(lambda x:jieba.lcut(x))
# stopWords=pd.read_csv('stop_words',encoding='utf-8',sep='sagdshewgwees',engine='python')
# stopWords=list(stopWords.iloc[:,0])
# data_after=data_cut.apply(lambda x:[i for i in x if i not in stopWords])
# adata=data_after.apply(lambda x:' '.join(x))
#
# n_features = 1000
# tf_vectorizer = CountVectorizer(strip_accents = 'unicode',
#                                 max_features=n_features,
#                                 max_df=0.6)
# tf = tf_vectorizer.fit_transform(adata)
#
#
# lda = LatentDirichletAllocation(n_components=1, max_iter=50,
#                                 learning_method='online',
#                                 learning_offset=50.,
#                                 random_state=0)
# lda.fit(tf)
#
#
# n_top_words = 5
#
# tf_feature_names = tf_vectorizer.get_feature_names()
#
#
# for topic_idx, topic in enumerate(lda.components_):
#     words_topN=[tf_feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]
#     print(words_topN)
import pandas as pd
data = pd.read_csv('俄国文学.csv')
# print(data["下载次数"].value_counts()[0:10])
# df= pd.DataFrame
# df.sort_values(by='下载次数',ascending=False)
# print(data["论文名"].data["下载次数"].value_counts()[0:10])
# print(data["City"][data["Country"] == "CN"].value_counts()[0:10])
df1 = data.sort_values(by='下载次数',ascending = False)
print(df1[0:10])



