import newspaper
from newspaper import fulltext
from newspaper import Article
from newspaper import news_pool
import requests
# 词频统计库
import collections
import numpy as np
import jieba
# 词云展示库
import wordcloud
# 图像处理库
from PIL import Image
# 图像展示库
import matplotlib.pyplot as plt
'''
newspaper 框架是一个主要用来提取新闻内容及分析的 Python 爬虫框架，更确切的说，newspaper 是一个 Python 库，但这个库由第三方开发
比较简洁
速度较快
支持多线程
支持多语言
'''
# 1.查看支持语言
# print(newspaper.languages())
# 2.环球网 默认情况下，newspaper 缓存所有以前提取的文章，并删除它已经提取的任何文章，使用 memoize_articles 参数选择退出此功能
# hq_paper = newspaper.build("https://tech.huanqiu.com/", language="zh", memoize_articles=False)
# 3.获取文章url
# hq_paper = newspaper.build("https://tech.huanqiu.com/", language="zh", memoize_articles=False)
# for acticle in hq_paper.articles:
#     print(acticle.url)
# 4.获取类别
# hq_paper = newspaper.build("https://tech.huanqiu.com/", language="zh", memoize_articles=False)
# for category in hq_paper.category_urls():
#     print(category)
# 5.获取品牌和描述
# hq_paper = newspaper.build("https://tech.huanqiu.com/", language="zh", memoize_articles=False)
# print(hq_paper.brand)  # 品牌
# print(hq_paper.description)  #  描述
# 6.TODO 下载解析
# hq_paper = newspaper.build("https://tech.huanqiu.com/", language="zh", memoize_articles=False)
# article = hq_paper.articles[4]
# # 下载
# article.download()
# # 解析
# article.parse()
# # 获取文章标题
# print("title=", article.title)
# # 获取文章日期
# print("publish_date=", article.publish_date)
# # 获取文章作者
# print("author=", article.authors)
# # 获取文章顶部图片地址
# print("top_iamge=", article.top_image)
# # 获取文章视频链接
# print("movies=", article.movies)
# # 获取文章摘要
# print("summary=", article.summary)
# # 获取文章正文
# print("text=", article.text)

# 7.Article 类使用
# article = Article('https://money.163.com/19/1130/08/EV7HD86300258105.html')
# article.download()
# article.parse()
# print("title=", article.title)
# print("author=", article.authors)
# print("publish_date=", article.publish_date)
# print("top_iamge=", article.top_image)
# print("movies=", article.movies)
# print("text=", article.text)
# print("summary=", article.summary)

# 8.解析 html
# html = requests.get('https://money.163.com/19/1130/08/EV7HD86300258105.html').text
# print('获取原信息：', html)
# text = fulltext(html, language='zh')
# print('解析后的信息', text)

# 9.nlp（自然语言处理）
# article = Article('https://money.163.com/19/1130/08/EV7HD86300258105.html')
# article.download()
# article.parse()
# print('解析前：', article.keywords)
# # nlp 处理
# article.nlp()
# print('处理后-->', article.keywords)

# 10.多任务 注意：获取内容太多解析很慢 如果需要快速得到结果自行在做很细的处理
# hq_paper = newspaper.build('https://www.huanqiu.com', language="zh")
# sh_paper1 = newspaper.build('http://news.sohu.com', language="zh")
# sn_paper2 = newspaper.build('https://news.sina.com.cn', language="zh")
#
# papers = [hq_paper, sh_paper1, sn_paper2]
# # 线程数为 3 * 2 = 6
# news_pool.set(papers, threads_per_source=2)
# news_pool.join()
# # print(hq_paper.articles[0].html) # 内容很多无法在控制台显示
# print(hq_paper.description) # 和article对象操作一样

# TODO 词云实现
# 文章获取及处理
# 获取文章
article = newspaper.Article('https://news.sina.com.cn/o/2019-11-28/doc-iihnzahi3991780.shtml')
# 下载文章
article.download()
# 解析文章
article.parse()
# 对文章进行 nlp 处理
article.nlp()
# nlp 处理后的文章拼接
article_words = "".join(article.keywords)
# 精确模式分词(默认模式)
seg_list_exact = jieba.cut(article_words, cut_all=False)
# 存储分词结果
object_list = []
# 移出的词
rm_words = ['迎', '以来', '将']
# 迭代分词对象
for word in seg_list_exact:
    if word not in rm_words:
        object_list.append(word)
# 词频统计
word_counts = collections.Counter(object_list)
# 获取前 10 个频率最高的词
word_top10 = word_counts.most_common(10)
# 词条及次数
for w, c in word_top10:
    print(w, c)

# 生成词云
# 词频展示
# 定义词频背景
mask = np.array(Image.open('bg.jpg'))
wc = wordcloud.WordCloud(
    # 设置字体格式
    font_path='C:/Windows/Fonts/simhei.ttf',
    # 背景图
    mask=mask,
    # 设置最大显示的词数
    max_words=100,
    # 设置字体最大值
    max_font_size=120
)
# 从字典生成词云
wc.generate_from_frequencies(word_counts)
# 从背景图建立颜色方案
image_colors = wordcloud.ImageColorGenerator(mask)
# 显示词云
plt.imshow(wc)
# 关闭坐标轴
plt.axis('off')
plt.savefig('wc.jpg')
# 显示图像
plt.show()

# 更多函数操作：https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#performing-nlp-on-an-article