from tool import dbconfig,Db,get_html,mylog
from pyquery import PyQuery as pq
from urllib.parse import urljoin
import time
import random
from pymysql.converters import escape_string
#
# 视频采集
#
urls = [
    ('https://www.98zb.com/zqqx/',117),
    ('https://www.98zb.com/lanqiustar/',118)
]

#pages = [(url , category_id , title , time)]
def content_page(base_url):
    pages = []
    for url in urls:
        html = get_html(url[0] , encoding='gb2312')
        time.sleep(random.randint(1,3))
        doc = pq(html)
        items = doc('.list_body_bd li').items()
        for item in items:
            content_url = urljoin(base_url , item.find('a').attr('href'))
            time_str = "20"+item.find('.float_right').text()
            unix_time = time.mktime(time.strptime(time_str , '%Y年%m月%d日'))
            if unix_time != None:
                unix_time = int(unix_time)
            pages.append((content_url , url[1] , item.find('a').text() , unix_time))
    return pages

def page_item_exist(pages , db):
    clear_pages = []
    #检查数据库中是否已经存在
    for page in pages:
        sql = 'select id from mf_article_doc where title="%s"' % page[2]
        data = db.getdata(sql)
        if(len(data) == 0):
            clear_pages.append(page)
    return clear_pages

def parse_content_page(pages , base_url):
    for page in pages:
        html = get_html(page[0] , encoding='gb2312')
        time.sleep(random.randint(1,3))
        if html == '':
            continue
        doc = pq(html)
        title = page[2]
        pic = ""
        kws = doc(".fav a").items()
        kw = []
        for k in kws:
            kw.append(k.text().strip())
        kw = ",".join(kw)
        content_html = doc(".Content-body")
        content_a = content_html("a").items()
        for at in content_a:
            if(at.attr("href").startswith("/") or at.attr("href").find("98zhibo.com")>0):
                at.remove()
        content = content_html.html(method='html')
        content = escape_string(content)
        content_data = ((page[1] , title ,kw, content , pic , page[3]))
        insert_content(content_data , db)
        mylog(f'视频---%s--插入文章成功' % content_data[1] , 'shipin.log')

def insert_content(content_data , db):
    sql = 'insert into mf_article_doc(category_id , title , kw , content , pic , create_time) values("%s","%s","%s","%s","%s","%d")' % content_data
    db.exe_sql(sql)

if __name__ == '__main__':
    try:
        db = Db(dbconfig)
        print("采集开始")
        pages = content_page('https://www.98zb.com/')
        clear_pages = page_item_exist(pages , db)
        mylog('视频-----文章数量:%d,需采集:%d' % (len(pages) , len(clear_pages)) , 'shipin.log')
        #print(clear_pages)
        # clear_pages = [('https://www.tiyipu.com/article/123827444.html', 112, '周六302 萨拉戈萨vs瓦伦西亚 比分预测分析', '西篮甲')]
        parse_content_page(clear_pages , 'https://www.98zb.com/')
        db.close()
        print("采集结束")
    except Exception as e:
        print(e)
        mylog(e , 'shipin.log')