from parse import NewsParse
import requests
from bs4 import BeautifulSoup
import time
import datetime


def parseComicNewsDetail(url):
    newsDetail = {}
    try:
        res = requests.get(url)
        res.encoding = 'gbk'
        soul = BeautifulSoup(res.text, 'html5lib')
        newsDetail['docurl'] = url
        newsDetail['title'] = soul.select('.hd h1')[0].text
        # extra = soul.select('#news_template_03_AuthorAndTime > span')[0].text
        newsDetail['sourcefrom'] = '国际新闻'
        contentString = ''
        if len(soul.select(".Cnt-Main-Article-QQ p")) > 0:
            for it in soul.select(".Cnt-Main-Article-QQ p"):
                temp = str(it)
                if '<script' in temp:
                    contentString += ''
                else:
                    contentString += temp
        elif len(soul.select("#Cnt-Main-Article-QQ p")) > 0:
            for it in soul.select("#Cnt-Main-Article-QQ p"):
                temp = str(it)
                if '<script' in temp:
                    contentString += ''
                else:
                    contentString += temp
        newsDetail['content'] = contentString
        print('正常: ', newsDetail['title'])
    except Exception as e:
        print('!有异常', e)
        newsDetail = None
    return newsDetail


def parseHtmlPage(pageUrl):
    res = requests.get(pageUrl)
    soul = BeautifulSoup(res.text, 'html5lib')
    itemsUrl = []
    for div in soul.select('.tabclass .Q-tpList'):
        item = {}
        item['docurl'] = 'http://comic.qq.com' + div.select('a')[0]['href']
        if len(div.select('a')[0].select('img')) > 0:
            item['imgurl'] = div.select('a')[0].select('img')[0]['src']
        else:
            item['imgurl'] = ''
        itemsUrl.append(item)
    return itemsUrl


parse = NewsParse(parseHtmlPage, parseComicNewsDetail)
pageurl = "http://comic.qq.com/#tabs-1"
while True:
    news_total = parse.parseHtmlPageLinks(pageurl)
    for news in news_total:
        if parse.findNewsData(news['docurl']) == None or len(parse.findNewsData(news['docurl'])) <= 0:
            news['classify'] = '动漫'
            parse.insertNewsData(news)
            print("__插入:", news['title'])
        else:
            print("__数据库中已有，不再插入___")
    dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    print('先睡个十分钟，然后再看看有没有新的新闻出现', dt)
    time.sleep(60 * 10)
