#coding:utf-8
'''
* coder  : dzlua
* email  : 505544956@qq.com
* module : spider
* path   : wxgzh
* file   : jmedia.py
* time   : 2017-12-26 10:44:48
'''
#--------------------#
import sys, os, codecs, re, time
sys.path.append('../module/')
#--------------------#
from spider import Spider
from bs4 import BeautifulSoup
#--------------------#

#--------------------#
cfg = {
    'url': 'http://www.jingmeiti.com/',
    'tb': 'wxgzh_jmedia',
}
#--------------------#

#--------------------#
def info_run(sp, fun, totle, tp='begin', fenge=200000, step=10):
    last_index = 0
    b = tp=='begin'
    for i in range(step, totle+step, step):
        if b:
            if i >= fenge:
                break
        else:
            if i < fenge:
                last_index = i
                continue
        #
        sp.prt('%s, %s' % (last_index, i))
        fun(sp, last_index, i)
        sp.wait()
        last_index = i
#--------------------#

#--------------------#
def save_data(sp, href):
    sql = "INSERT INTO " + cfg['tb'] + \
        " (name, href)" \
        " VALUES(%s,%s);"
    param = ( u'鲸媒体', href )
    try:
        if not sp.has_data(cfg['tb'], 'href', href):
            sp.execute(sql, param)
    except:
        sp.logp('save_data : error save. %s' % (href))
        return
    sp.prt('save_data : save done. %s' % (href))
#--------------------#

#--------------------#
def save_info(sp, id, data):
    sql = 'UPDATE ' + cfg['tb'] + \
        ' SET auth=%s,type=%s,title=%s,date=%s,abstract=%s,info=%s WHERE id=%s;'
    param = ( data['author'],
        data['cat'],
        data['title'],
        data['date'],
        data['abstract'],
        data['text'], id )
    try:
        sp.execute(sql, param)
    except:
        sp.logp('save_info : error save. id: %s' % (id))
        return
    sp.prt('save_info : save done. id: %s' % (id))
#--------------------#

#--------------------#
def handle_list(sp, url, resp, args):
    if not resp:
        sp.logp('handle_list_error : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find('div', class_=['article','col-xs-12','col-sm-8','col-md-8'])
    if not div:
        sp.logp('handle_list_error : no article. url: %s' % url)
        return
    #
    for a in div.find_all('a', class_=['next', 'page-numbers']):
        if a.text == u'下页':
            sp.put(handle_list, a.get('href'))
            break
    #
    div = div.find('div', class_=['ajax-load-box','posts-con'])
    if not div:
        sp.logp('handle_list_error : no ajax-load-box. url: %s' % url)
        return
    for ele in div.children:
        if ele.name != 'div':
            continue
        div = ele.find('div', class_=['posts-default-title'])
        if not div:
            continue
        a = div.find('a')
        if not a:
            continue
        save_data(sp, a.get('href'))
#--------------------#

def getSpan(sp, span):
    if not span:
        return ''
    return sp.get_strings(span) or ''
#--------------------#
def handle_info(sp, url, resp, id):
    if not resp:
        sp.logp('handle_info_error : no response. id: %s, url: %s' % (id, url))
        return
    soup = BeautifulSoup(resp, "lxml")
    #
    div = soup.find('div', class_=['post-title'])
    if not div:
        sp.logp('handle_info_error : no post-title. id: %s, url: %s' % (id, url))
        return
    h1 = div.find('h1', class_=['title'])
    if not h1:
        sp.logp('handle_info_error : no title. id: %s, url: %s' % (id, url))
        return
    title = h1.text
    author = getSpan(sp, div.find('span', class_=['postauthor']))
    cat = getSpan(sp, div.find('span', class_=['postcat']))
    date = getSpan(sp, div.find('span', class_=['postclock']))
    #
    div = soup.find('div', class_=['post-content'])
    if not div:
        sp.logp('handle_info_error : no post-content. id: %s, url: %s' % (id, url))
        return
    #
    abstract = ''
    p = div.find('p', class_=['post-abstract'])
    if p:
        abstract = p.text[3:]
    #
    text = sp.del_empty_line(sp.get_strings(div))
    text = re.sub(r'&nbsp', '', text)
    save_info(sp, id, {
        'title': title,
        'author': author,
        'cat': cat,
        'date': date,
        'abstract': abstract,
        'text': text,
    })
#--------------------#

#--------------------#
def run1(sp):
    pass
    #sp.put(handle_list, cfg['url'])
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/category/news'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/category/%E8%A1%8C%E4%B8%9A%E7%A0%94%E7%A9%B6'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/category/elite'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/category/elite'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/category/%E7%AD%96%E5%88%92'))
    #
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/daijing'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/xianliqin'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/gaoyang'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/xiaojing'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/yuling'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/wanjing'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/liaofeng'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/daijing02'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/friends'))
    #sp.put(handle_list, sp.url_join(cfg['url'], 'archives/author/cpfggg'))
#--------------------#

#--------------------#
def run2(sp, begin, end):
    sql = 'SELECT * FROM %s WHERE id>=%s and id<%s and info is null;' % (
        cfg['tb'], begin, end )
    for r in sp.select(sql):
        sp.put(handle_info, r[5], r[0])
#--------------------#

#--------------------#
sp = Spider('localhost', 'dzlua', 'dzlua', 'spider', 4, 10, charset='utf8mb4')
#
sql = "CREATE TABLE IF NOT EXISTS " + cfg['tb'] + \
    " (id INT AUTO_INCREMENT PRIMARY KEY," \
    " name TEXT," \
    " auth TEXT," \
    " type TEXT," \
    " title TEXT," \
    " href TEXT," \
    " date TEXT," \
    " abstract TEXT," \
    " info TEXT )" \
    " DEFAULT CHARSET=utf8mb4;"
sp.execute(sql)
#----------#
#run1(sp)
#run2(sp, 1, 2)
info_run(sp, run2, 3804, 'begin', 10000, 10)
#----------#
sp.wait()
sp.close()
#--------------------#
