import pandas as pd
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba.analyse
import matplotlib
import os

#
#
#
df_news = pd.read_table('C:\\Users\Acer\PycharmProjects\pythonProject1\data\data.txt', names=['category', 'theme', 'URL', 'content'], encoding='utf-8')
df_news = df_news.dropna()

print(df_news.tail())
print(df_news.shape)
content = df_news.content.values.tolist() #将每一篇文章转换成一个list
print (content[4999]) #随便选择其中一个看看，就是最后一篇新闻
content_S = []
for page in content:
    current_segment = jieba.lcut(page)  # 对每一篇文章进行分词
    if len(current_segment) > 1 and current_segment != '\r\n':  # 换行符
        content_S.append(current_segment)  # 保存分词的结果
print(content_S[4999])
#
# # ['全球','最美','女人','合成图','：','：', '国','整形外科','教授' '李承哲','，','在','国际', '学术','杂志','美容','整形外科',
# # '学会','学报','发表','了','考虑','种族','和','民族','脸型','多样性','的','黑人','、','白人','、','中国',
# # '人','、','日本','人','女性','“','最具','魅力','的','脸型','”','的','论文','。','李承哲','合成','这些','脸型','时','采用',
# # '的','演艺人','脸型','有','，','黑人','１','３','名','、','白','１','６','名','、','中国','人','２','０','名','、','日本',
# # '人','１','４','名','等','共','６','３','名','的','脸型','。','Ｍ','贾凶','宰笙','蛴','乙来','挝','：','黑人','美女','、',
# # '白人','美女','、','中国','美女','、','日本','美女','和','韩国','美女','。']
df_content=pd.DataFrame({'content_S':content_S}) #专门展示分词后的结果
print(df_content.head())
stopwords=pd.read_csv("stopwords.txt",index_col=False,sep="\t",quoting=3,names=['stopword'], encoding='utf-8')
print(stopwords.head(10))
#
#
def drop_stopwords(contents, stopwords):
    contents_clean = []
    all_words = []
    for line in contents:
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    return contents_clean, all_words


contents = df_content.content_S.values.tolist()
stopwords = stopwords.stopword.values.tolist()
contents_clean, all_words = drop_stopwords(contents, stopwords)
df_content=pd.DataFrame({'contents_clean':contents_clean})
print(df_content.tail())
df_all_words=pd.DataFrame({'all_words':all_words})
print(df_all_words.tail())


# matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)

# woordcloud_data = df_all_words.all_words.value_counts()[:100]
# wordcloud = WordCloud(font_path="./data/simhei.ttf", background_color="white", max_font_size=80)
# wordcloud = wordcloud.fit_words(woordcloud_data)
# plt.imshow(wordcloud)
# plt.axis('off')
# plt.show()



index = 2400
content_S_str = "".join(content_S[index])
print(content_S_str)
print("  ".join(jieba.analyse.extract_tags(content_S_str, topK=5,withWeight=False)))
df_train=pd.DataFrame({'contents_clean':contents_clean,'label':df_news['category']})
print(df_train.tail())

df_train.label.unique()
label_mapping = {"汽车": 1,"财经": 2,"科技": 3,"健康": 4,"体育": 5,"教育": 6,"文化": 7,"军事": 8,"娱乐": 9,"时尚": 0}
df_train['label'] = df_train['label'].map(label_mapping)
print(df_train.head())






from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values, random_state=1)
print(x_train[0][1])
words = []
for line_index in range(len(x_train)):
    try:

        words.append(' '.join(x_train[line_index]))
    except:
        print(line_index)

# print(len(words))
# from sklearn.feature_extraction.text import CountVectorizer
# texts=["dog cat fish","dog cat cat","fish bird",'bird']
# cv = CountVectorizer()
# cv_fit=cv.fit_transform(texts)
#
# print(cv.get_feature_names_out())
# print(cv_fit.toarray())
#
# print(cv_fit.toarray().sum(axis=0))


# texts=["dog cat fish","dog cat cat","fish bird", 'bird']
# cv = CountVectorizer(ngram_range=(1,4))#设置ngram参数，让结果不光包含一个词还有2，3个的组合
# cv_fit=cv.fit_transform(texts)
#
# print(cv.get_feature_names_out())
# print(cv_fit.toarray())

vec = CountVectorizer(analyzer='word',lowercase = False)
feature = vec.fit_transform(words)
print(feature.shape)