from parse import NewsParse
import requests
from bs4 import BeautifulSoup
import json
import time
import re
from bs4 import element
import datetime


# from urllib.parse import quote

def getNetEaseNewsDetail(url):
    if not ('.html' in url):
        return None
    newsDetail = {}
    try:
        res = requests.get(url)
        soul = BeautifulSoup(res.text, 'html5lib')
        newsDetail['docurl'] = url
        newsDetail['title'] = soul.select('.post_content_main h1')[0].text
        newsDetail['sourcefrom'] = soul.select('#ne_article_source')[0].text
        divArray = soul.select("#endText")
        pcontent = '';
        for div in divArray:
            # [s.extract() for s in div('div')]
            for item in div.contents:
                if (not isinstance(item, element.NavigableString)) and len(item.select('.ad_hover_href')) > 0:
                    pcontent += ""
                elif ('<pre' in str(item) and '</pre>' in str(item)) or ('special_tag_wrap' in str(item)):
                    pcontent += ""
                elif 'AD200x300' in str(item) or 'ep-source' in str(item):
                    pcontent += ""
                elif 'alt="牛股王"' in str(item):
                    pcontent += ""
                elif not isinstance(item, element.NavigableString) and len(item.select('.video')) > 0:
                    pcontent += getNotStyleString(str(item))
                elif not isinstance(item, element.NavigableString) and len(item.select('img')) > 0:
                    pcontent += getMatchString(str(item))
                else:
                    pcontent += getNotStyleString(str(item))
        newsDetail['content'] = pcontent
        print('正常: ', newsDetail['title'])
    except Exception as e:
        print('!有异常', e)
        newsDetail = None
    return newsDetail


def getNotStyleString(pcontent):
    # 过滤height
    pattern = re.compile("height:(.+)px;")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    # 过滤style标签
    pattern = re.compile("style=\"(.+?)\"")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    # 过滤style标签
    pattern = re.compile("style=\'(.+?)\'")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    return pcontent


def getMatchString(pcontent):
    # 过滤height
    pattern = re.compile("height=\"(.+?)\"")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    # 过滤widht
    pattern = re.compile("width=\"(.+?)\"")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    # 过滤style标签
    pattern = re.compile("style=\"(.+?)\"")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    # 过滤style标签
    pattern = re.compile("style=\'(.+?)\'")
    match = pattern.search(pcontent)
    if match:
        pcontent = pcontent.replace(match.group(), "")
    return pcontent


def parseNeteaseNewsPage(pageurl):
    '''

    :param pageurl: 新闻列表页面
    :return: 返回三个值，第一个是JSON字符串，第二个是这个字符串里面对应文章详情页面的链接的key值,第三个是封面图标
    '''
    res = requests.get(pageurl)
    jd = None
    try:
        jd = json.loads(res.text.lstrip('data_callback(').rstrip(')'), strict=False)
    except:
        print('json数据有异常。。。。。。。。')
    return jd, 'docurl', 'imgurl'


parse = NewsParse(parseNeteaseNewsPage, getNetEaseNewsDetail)

netease_page_dict = (
    'yaowen', 'guonei', 'war', 'money', 'shehui', 'guoji', 'tech', 'sports', 'ent', 'auto', 'jiankang')
netease_page_classify = {'yaowen': '要闻', 'guonei': '国内', 'war': '军事', 'money': '财经', 'shehui': '社会', 'guoji': '国际',
                         'tech': '科技', 'sports': '体育', 'ent': '娱乐', 'auto': '汽车', 'jiankang': '健康'}
while True:
    for page in netease_page_dict:
        classify = netease_page_classify[page]
        print("================================当前解析的是 {}".format(classify), '页面======================》')
        pageurl = 'http://temp.163.com/special/00804KVA/cm_{}.js?callback=data_callback'.format(page)
        news_total = []
        news_total = parse.parseJsonPageLinks(pageurl)
        count = 0;
        for news in news_total:
            count += 1
            if parse.findNewsData(news['docurl']) == None or len(parse.findNewsData(news['docurl'])) <= 0:
                news['classify'] = classify
                parse.insertNewsData(news)
                print("__插入:", news['title'])
            else:
                print("__数据库中已有，不再插入___", count)
                # df = pandas.DataFrame(news_total)
                # df.to_excel('netease_news_{}.xlsx'.format(page))
    dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    print('先睡个十分钟，然后再看看有没有新的新闻出现', dt)
    time.sleep(60 * 10)

print('解析新闻结束！！！')
