#coding:utf-8
'''
* coder  : dzlua
* email  : 505544956@qq.com
* module : spider
* path   : wxgzh
* file   : app.py
* time   : 2017-12-01 10:15:54
'''
#--------------------#
import sys, os, codecs, re, time
sys.path.append('../module/')
#--------------------#
from spider import Spider
from bs4 import BeautifulSoup
#--------------------#

#--------------------#
cfg = {
    'url': 'http://werank.cn/',
    'tb1': 'wxgzh_gzh',
    'tb2': 'wxgzh_info',
    'zh': {}
}
#--------------------#

#--------------------#

#--------------------#
def check_fh(text):
    tt = r'\'|"'
    title = re.sub(tt, '', text)
    return title
#--------------------#
def save_tb1(sp, name, href):
    sql = "INSERT INTO " + cfg['tb1'] + \
        " (name, href)" \
        " VALUES(%s,%s);"
    param = ( name, href )
    try:
        if not sp.has_data(cfg['tb1'], 'href', href):
            sp.execute(sql, param)
    except:
        sp.logp('save_tb1 : error save. %s, %s' % (name, href))
        return
    sp.prt('save_tb1 : save done. %s, %s' % (name, href))
def save_tb2(sp, name, title, href):
    title = check_fh(title)
    sql = "INSERT INTO " + cfg['tb2'] + \
        " (name, title, href)" \
        " VALUES(%s,%s,%s);"
    param = ( name, title, href )
    try:
        if not sp.has_data(cfg['tb2'], 'title', title):
            sp.execute(sql, param)
    except:
        sp.logp('save_tb2 : error save. %s, %s, %s' % (name, title, href))
        return
    sp.prt('save_tb2 : save done. %s, %s, %s' % (name, title, href))
#--------------------#
def save_tb2_info(sp, id, url, info, date):
    sql = 'UPDATE ' + cfg['tb2'] + ' SET info=%s,date=%s WHERE id=%s;'
    param = (info, date, id)
    try:
        sp.execute(sql, param)
    except:
        sp.logp('save_tb2_info : error save. %s, %s' % (id, url))
        return
    sp.prt('save_tb2_info : save done. %s, %s' % (id, url))
#--------------------#
def handle_main(sp, url, resp, args):
    if not resp:
        sp.logp('handle_main : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    tbs = soup.find_all('table', class_ = [
        'table', 'table-striped', 'table-bordered' ] )
    for tb in tbs:
        tbd = tb.find('tbody')
        if not tbd:
            continue
        for tr in tbd.find_all('tr'):
            tds = tr.find_all('td')
            if not tds or len(tds) < 2:
                continue
            a = tds[0].find('a')
            if not a:
                continue
            save_tb1(sp, a.text, a.get('href'))
#--------------------#
def handle_list(sp, url, resp, args):
    if not resp:
        sp.logp('handle_list : no response. url: %s' % url)
        return
    host = sp.url_host(url)
    soup = BeautifulSoup(resp, "lxml")
    for a in soup.find_all('a', class_=['question_link']):
        href = sp.url_join(host, a.get('href'))
        save_tb2(sp, args['name'], sp.strip(a.text), href)
    #
    divs = soup.find_all('a')
    for a in divs:
        if a.text != u'下一页':
            continue
        href = sp.url_join(host, a.get('href'))
        sp.put(handle_list, href, args)
        break
#--------------------#

#--------------------#
def handle_info(sp, url, resp, args):
    if not resp:
        sp.logp('handle_info : no response. id: %s, times: %s, url: %s' % (
            args['id'], args['times'], url ))
        '''if args['times'] >=3:
            sp.logp('handle_info : no response. id: %s, times: %s, url: %s' % (
                args['id'], args['times'], url ))
        else:
            sp.prt('handle_info : no response. id: %s, times: %s, url: %s' % (
                args['id'], args['times'], url ))
            args['times'] += 1
            sp.put(handle_info, url, args)'''
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find(id='js_content') or soup.find(id='img-content')
    if not div:
        sp.prt('handle_info : no #js_content. id: %s, url: %s' % (
            args['id'], url ))
        save_tb2_info(sp, args['id'], args['href'],
            '...!',
            '2017-12-11' )
        return
    text = sp.get_strings(div)
    #
    date = ''
    div = soup.find(id='post-date')
    if div:
        date = div.text
    #
    save_tb2_info(sp, args['id'], args['href'],
        sp.del_empty_line(text),
        date )
#--------------------#

#--------------------#
def handle_error(sp, url, resp, args):
    if not resp:
        sp.logp('handle_error : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find('div', class_=['topic_name_editor'])
    if not div:
        sp.logp('handle_error : no topic_name_editor. url: %s' % url)
        return
    h1 = div.find('h1', class_=['inline'])
    if not h1:
        sp.logp('handle_error : no inline. url: %s' % url)
        return
    span = h1.find('span')
    if not span:
        sp.logp('handle_error : no span. url: %s' % url)
        return
    name = sp.strip(span.text)
    handle_list(sp, url, resp, { 'name': name })
#--------------------#

#--------------------#
sp = Spider('localhost', 'dzlua', 'dzlua', 'spider', 10, 20, charset='utf8mb4')
#----------#

#----------#
sql = "CREATE TABLE IF NOT EXISTS " + cfg['tb1'] + \
    " (id INT AUTO_INCREMENT PRIMARY KEY," \
    " name TEXT," \
    " href TEXT)" \
    " DEFAULT CHARSET=utf8;"
sp.execute(sql)
#
sql = "CREATE TABLE IF NOT EXISTS " + cfg['tb2'] + \
    " (id INT AUTO_INCREMENT PRIMARY KEY," \
    " name TEXT," \
    " title TEXT," \
    " href TEXT," \
    " info TEXT )" \
    " DEFAULT CHARSET=utf8;"
sp.execute(sql)
#----------#

#----------#
#sp.put(handle_main, cfg['url'])
#sp.wait()
#----------#

#----------#
#sql = 'SELECT * FROM %s;' % (cfg['tb1'])
#for r in sp.select(sql):
#    sp.prt('%s, %s, %s' % r)
#    sp.put(handle_list, r[2], {'name': r[1]})
#    sp.wait()
#----------#

#----------#
def error_save(sp, file):
    f = codecs.open(file, 'r', 'utf-8' )
    #
    for line in f.readlines():
        msg = u'save_tb2 : error save. '
        p = line.find(msg)
        if p != -1:
            msgs = line[p+len(msg):].split(', ', 1)
            name = msgs[0]
            msgs = msgs[1].split(', http://')
            title = msgs[0]
            url = 'http://' + msgs[1]
            save_tb2(sp, name, title, url)
        msg = u'Error 连接 '
        p = line.find(msg)
        if p != -1:
            msgs = line[p+len(msg):].split(' ')
            sp.put(handle_error, msgs[0])
    #
    f.close()
    #
#----------#

#----------#
def error_info(sp, file):
    f = codecs.open(file, 'r', 'utf-8' )
    #
    last_line = u''
    for line in f.readlines():
        msg = u'handle_info : no response. '
        msg1 = u'失败,错误原因: Not Found'
        p = line.find(msg)
        if p != -1 and last_line.find(msg1) != -1:
            msgs = line[p+len(msg):-1].split(', ', 2)
            id = msgs[0].split(u'id: ')[1]
            href = msgs[2].split(u'url: ')[1]
            save_tb2_info(sp, id, href,
                '...@',
                '2017-12-11' )
        last_line = line
    #
    f.close()
    #
#----------#

#----------#
def run_info(sp, begin, end):
    sql = 'SELECT id,href FROM %s WHERE id>=%s and id<%s and info is null;' % (
        cfg['tb2'], begin, end )
    for r in sp.select(sql):
        sp.put(handle_info, r[1], {
            'id': r[0],
            'times': 1,
            'href': r[1]
        })
#----------#

#----------#
def info_run(tp='begin', fenge=200000, step=5):
    last_index = 0
    b = tp=='begin'
    for i in range(step, 500501, step):
        if b:
            if i >= fenge:
                break
        else:
            if i < fenge:
                last_index = i
                continue
        #
        sp.prt('%s, %s' % (last_index, i))
        run_info(sp, last_index, i)
        sp.wait()
        last_index = i
#----------#

#----------#
#error_save(sp, sp.__class__.__name__.lower() + '.log1')
error_info(sp, sp.__class__.__name__.lower() + '.log1')
#info_run('begin', 470000)
#info_run('end', 470000)
#info_run('begin', fenge=600501)
#----------#

#----------#
sp.wait()
sp.close()
#--------------------#
