from parse import NewsParse
import requests
from bs4 import BeautifulSoup
import time
import datetime

# from urllib.parse import quote


def parseZakerNewsDetail(url):
    if '.html' in url:
        return None
    newsDetail = {}
    try:
        res = requests.get(url)
        res.encoding = 'utf-8'
        soul = BeautifulSoup(res.text, 'html5lib')
        newsDetail['docurl'] = url
        newsDetail['title'] = soul.select('#news_template_03_titleContent')[0].text
        extra = soul.select('#news_template_03_AuthorAndTime > span')[0].text
        newsDetail['sourcefrom'] = soul.select('#news_template_03_AuthorAndTime')[0].text.replace(extra, '')
        contentString = ''
        for it in soul.select("#content_text #content")[0].contents[:-1]:
            temp = str(it)
            try:
                # 图片的处理
                if len(it.select('img')) > 0:
                    temp = str(it).replace('data-original', 'src')
                else:
                    # 视频处理
                    if len(it.select('iframe')) > 0:
                        temp = str(it).replace('src="', 'src="http:')
            except:
                temp = str(it)
            contentString += temp
        newsDetail['content'] = contentString
        print('正常: ', newsDetail['title'])
    except Exception:
        print('!有异常')
        newsDetail = None
    return newsDetail


def parseHtmlPage(pageUrl):
    res = requests.get(pageUrl)
    soul = BeautifulSoup(res.text, 'html5lib')
    itemsUrl = []
    for a in soul.select('#infinite_scroll > a'):
        item = {}
        item['docurl'] = 'http:' + a['href']
        if len(a.select('div.pic-cover')) > 0:
            item['imgurl'] = 'http:' + a.select('div.pic-cover')[0]['style'].lstrip('background-image:url(').rstrip(
                ');')
        else:
            item['imgurl'] = ''
        itemsUrl.append(item)
    return itemsUrl


parse = NewsParse(parseHtmlPage, parseZakerNewsDetail)
pageUrl = 'http://app.myzaker.com/index.php?app_id=660'
while True:
    news_total = parse.parseHtmlPageLinks(pageUrl)
    for news in news_total:
        if parse.findNewsData(news['docurl']) == None or len(parse.findNewsData(news['docurl'])) <= 0:
            news['classify'] = '热点'
            parse.insertNewsData(news)
            print("__插入:", news['title'])
        else:
            print("__数据库中已有，不再插入___")
    dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    print('先睡个十分钟，然后再看看有没有新的新闻出现', dt)
    time.sleep(60 * 10)

# df = pandas.DataFrame(news_total)
# df.to_excel('zaker_news_hot.xlsx')
# print('爬取新闻完毕!!!!!')
