#!/ Mypython
# -*- coding: utf-8 -*-
# @Time    : 2018/5/20 11:38
# @Author  : LinYimeng
# @File    : baidubeau.py
# @Software: PyCharm
import time
from bs4 import BeautifulSoup
import requests
from lxml import etree
import newspaper #获取文本 pip3 install newspaper3k
from snownlp import SnowNLP
import jieba
from jieba import analyse

def GetfromBaidu(word, page=10):     ##输入为搜索关键词，函数的默认值page，咋用呢，搞不明白
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.400 QQBrowser/9.7.13014.400'
    }
    baidNewuurl = 'http://news.baidu.com'
    url = 'http://news.baidu.com/ns?cl=2&rn={0}&tn=news&word={1}'.format(page, word)  # rn=40 一页的条数40
    print("搜索网址：", url)
    html = requests.get(url, headers=headers).content
    # path = etree.HTML(html)
    # resultList = path.xpath('//*[@id="content_left"]/div[3]/div[@class="result"]')
    soup = BeautifulSoup(html,'html.parser')
    resultList = soup.find_all(class_='result')
    # print(resultList)
    print("获取到的新闻条数：", len(resultList), resultList)
    return resultList  #包含新闻内容的列表，每个元素代表一条新闻原始html的bs4

def XpathBoxContent(eachput):     #挨个新闻获取数据 ，输入为一条新闻原始html的bs4,
    title='';url='';source='';time=''; content=''#赋原始空值
    if eachput != "":
        eachput= etree.HTML(str(eachput))
        data = eachput.xpath('//h3/a')[0]  # //*[@id="5"]/h3/a/text()[1]  //*[@id="5"]/h3/a/em
        info = data.xpath('string(.)')
        title = info.replace('\n', '').replace(' ', '')  # 题目
        url = eachput.xpath('//h3/a/@href')[0]  # url
        # print(url,type(url))
        content1 = eachput.xpath('//div[@class="c-summary c-row "]')
        box = eachput.xpath('//div[@class="c-summary c-row "]/p/text()')
        if content1 == []:##新闻的网页结构有两种，一种带图片，一种不带图片
            content1 = eachput.xpath('//div[@class="c-summary c-row c-gap-top-small"]/div[@class ="c-span18 c-span-last"]')
            box = eachput.xpath('//div[@class="c-summary c-row c-gap-top-small"]/div[@class ="c-span18 c-span-last"]/p/text()')  ##l来源和时间的组合  腾讯科技  2013年12月03日 20:51
        content2 = content1[0].xpath('string(.)')  # 标签套标签
        content3 = content2.replace('\n', '')  # 标签套标签
        content4 = content3.replace(box[0], '', 1)  ##内容中去除下文会输出的时间和来源信息
        content = content4.strip("查看更多相关新闻>>  -  百度快照")
        source = box[0].split("\xa0")[0]  # '搜狐科技\xa0\xa02018年05月17日 08:48'
        time = box[0].split("\xa0")[-1]
        print("题目:", title)
        print("链接:", url)
        print("==内容来源:", source)
        print("==时间:", time)
        print("==内容提示;", content)
    return title, url, source, time, content


def Article(url):##输入url,获取文本，关键词、摘要。之前需要判断url是否重复,空为 ''
    article =  '空article' #赋予初始值
    sskey =  '空key'
    ssummary = '空ummary'
    a = newspaper.Article(url, language='zh')
    try:
        a.download()
        a.parse()
    except Exception as e:
        print(Exception, e, url, "链接无效，无法获取正文")
    # print("-a.text--", a.text)
    try:
        article =a.text.replace('\n', '').replace(' ', '')      #str带换行
        #print("--article--", article)
        s = SnowNLP(article)
        sskey = s.keywords(3)    #list
        ssummary = s.summary(3)  #list
        print('keywords', sskey)  # [u'语言', u'自然', u'计算机'
        print('summary', ssummary)  # [u'因而它是计算机科学的一部分',
    except ZeroDivisionError as E:
        print(Exception, E, url, "或因网页结构问题，无法获取正文")  # csdn不能获得
    return  article,sskey,ssummary
'''  暂时不需要的功能
# # 创建停用词list
# def stopwordslist(stopwords_patn):  #停用词表
#     file = open(stopwords_patn, 'r', encoding='UTF-8')
#     stopwords = [line.strip() for line in file.readlines()]
#     return stopwords


# def KeyWord(result) : #获取关键词  result[0] 分词后且去除停用词后的一个字符串
#     # tfidf = analyse.extract_tags
#     # for keyword in keywords:
#     #     print(keyword + "/")
#     # print("tfidf keyword end")
#     textrank = analyse.textrank
#     keywords = textrank(result[0])
#     # 输出抽取出的关键词
#     for keyword in keywords:
#         print (keyword)
#     print("textrank keyword end")

# def FenciandWordcount(artical):     #分词，去除停用词,获取统计词频。 artical一行原文文本 。包含句子和标点
#     result = list()
#     seg_list = jieba.cut(artical, cut_all=False)  # 精确模式 ,<class 'generator'>
#     outstr = ''
#     for word in seg_list:
#         if word not in stopwordslist(stopwords_patn):
#             if word != '\t':
#                 outstr += word
#                 outstr += " "
#     result.append(outstr.strip().replace('\n', '').replace('   ', ' ').split())   #一个列表，元素为一个个分割好的单个词
#      #词频统计
#     strl_ist =result
#     long =  len(strl_ist) #文章词数
#     print('strl_ist',strl_ist)
#     count_dict = {}
#     # 如果字典里有该单词则加1，否则添加入字典
#     for str in strl_ist:
#         if str in count_dict.keys():
#             count_dict[str] = count_dict[str] + 1
#         else:
#             count_dict[str] = 1
#     #按照词频从高到低排列
#     count_list=sorted(count_dict.items(),key=lambda x:x[1],reverse=True)
#     return long,count_list
'''
def urlExist(path,url,article): #判断链接是否存在，存在跳过，不存在就加入
    IFexist= 0 #默认不存在
    if url =='' or article=='空article':  #判断链接可用，是否提取到正文
        IFexist = 1
    else:
        with  open(path+"\\Url.txt", 'r+', encoding='utf-8 ') as urlFile:
            urlList = urlFile.readlines()
            if url+'\n' in urlList:
                IFexist = 1
                #print("pass")
            else:
                urlFile.write(url)
                urlFile.write('\n')
    return IFexist


def Cometrue(path,resultList):   #输入为之前输出
    for each in resultList:
        title, url, source, time, content = XpathBoxContent(each)
        article, sskey, ssummary = Article(url)
        IFexist = urlExist(path,url,article)
        if IFexist == 0:
           with open(path+"All.txt", 'a+',encoding="'utf-8'") as allFile :
               allFile.write('*****题目：')
               allFile.write(title+'\n')
               allFile.write('链接：')
               allFile.write(url+'\n')
               allFile.write('来源：')
               allFile.write(source+'\n')
               allFile.write('时间：')
               allFile.write(time+'\n')
               allFile.write('关键词：')
               allFile.write(" ;".join(sskey))
               allFile.write('\n')
               allFile.write('摘要：')
               allFile.write(" ;".join(ssummary))
               allFile.write('\n')
               allFile.write('全文内容：')
               allFile.write(article)
               allFile.write('\n')
    return None

def GetfromBaidu(word, page=10):     ##输入为搜索关键词，函数的默认值page，咋用呢，搞不明白
    list = []
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.400 QQBrowser/9.7.13014.400'
    }
    baidNewuurl = 'http://news.baidu.com'
    url = 'http://news.baidu.com/ns?cl=2&rn={0}&tn=news&word={1}'.format(page, word)  # rn=40 一页的条数40
    print("搜索网址：", url)
    html = requests.get(url, headers=headers).content
    # path = etree.HTML(html)
    # resultList = path.xpath('//*[@id="content_left"]/div[3]/div[@class="result"]')
    soup = BeautifulSoup(html,'html.parser')
    resultList = soup.find_all(class_='result')
    # print(resultList)
    print("获取到的新闻条数：", len(resultList))
    print('\n')
    return resultList  #包含新闻内容的列表，每个元素代表一条新闻原始html的bs4




if __name__ == '__main__':
    # time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
    # stopwords_patn = "E:\stopwords\STOPWORD_noE.txt"
    path = 'C:\\Users\Administrator\Desktop\\news'     ##文件存放路径
    try:
        urlFile = open(path+"\\baiduUrl.txt", 'w+',encoding='utf-8 ') #创建，覆盖原文件
        allFile = open(path+"\\baiduAll.txt", 'w+',encoding='utf-8 ')
        urlFile.close();allFile.close()
        print("重新搜索将会覆盖原文件，现在改已生成的文件名还来得及。其中文件All包括所有信息。")
        searchWord = input("请输入要搜索的词：")
        mypage = input("需要的新闻条数，默认10条，可输入小于10,或10，20，30，40，50。")
        print('开始运行喽，部分内容会在运行界面展示，具体内容请查看文件')
        print('\n')
        resultList = GetfromBaidu(searchWord, mypage)
        Cometrue(path,resultList)
    except FileNotFoundError as e:
        print(Exception, e)
        print('哦哦，不改程序就运行喽，宝宝你还是改一下文件存放路径吧')



# page 暂时不需要
# #page= path.xpath('//*[@id="page"]/a[-1]/@href')[0]
#         if page != "":
#             pageUrl =baidNewuurl+page
#             html = requests.get(pageUrl.format(word), headers=headers)
#         else:
#             Next = False
#         xpathBoxContent(each)
#             try:
#                 title, content, source, time =  xpathBoxContent(each)
#                 list.append(title, content, source, time)
#                 print(title, content, source, time)
#                 #加入对象
#             except(IOError,ZeroDivisionError) as e:
#                 print(e)
#         page= path.xpath('//*[@id="page"]/a[-1]/@href')[0]
#         if page != "":
#             pageUrl =baidNewuurl+page
#             html = requests.get(pageUrl.format(word), headers=headers)
#         else:
#             Next = False