#coding:utf-8
'''
* coder  : dzlua
* email  : 505544956@qq.com
* module : spider
* path   : wxgzh
* file   : rmrb.py
* time   : 2017-12-19 14:33:19
'''
#--------------------#
import sys, os, codecs, re, time
sys.path.append('../module/')
#--------------------#
from spider import Spider
from bs4 import BeautifulSoup
#--------------------#

#--------------------#
cfg = {
    'url': 'http://www.ziliaoku.org',
    'tb': 'wxgzh_rmrb',
    'tb1': 'wxgzh_rmrb1',
    'tb2': 'wxgzh_rmrb2',
    'zh': {}
}
#--------------------#

#--------------------#
def check_fh(text):
    tt = r'\'|"'
    title = re.sub(tt, '', text)
    return title
#--------------------#
def save_data(sp, href, date, title, text):
    title = check_fh(title)
    sql = "INSERT INTO " + cfg['tb'] + \
        " (name, title, href, date, info)" \
        " VALUES(%s,%s,%s,%s,%s);"
    param = ( u'人民日报', title, href, date, text )
    try:
        sp.execute(sql, param)
    except:
        sp.logp('save_data : error save. %s, %s, %s' % (date, title, href))
        return
    sp.prt('save_data : save done. %s, %s, %s' % (date, title, href))
#--------------------#
def save_data1(sp, href, date):
    sql = "INSERT INTO " + cfg['tb1'] + \
        " (href, date)" \
        " VALUES(%s,%s);"
    param = ( href, date )
    try:
        if not sp.has_data(cfg['tb1'], 'href', href):
            sp.execute(sql, param)
    except:
        sp.logp('save_data1 : error save. %s, %s' % (date, href))
        return
    sp.prt('save_data1 : save done. %s, %s' % (date, href))
#--------------------#
def save_data2(sp, href, date):
    sql = "INSERT INTO " + cfg['tb2'] + \
        " (href, date)" \
        " VALUES(%s,%s);"
    param = ( href, date )
    try:
        if not sp.has_data(cfg['tb2'], 'href', href):
            sp.execute(sql, param)
    except:
        sp.logp('save_data2 : error save. %s, %s' % (date, href))
        return
    sp.prt('save_data2 : save done. %s, %s' % (date, href))
#--------------------#

#--------------------#
def handle_year(sp, url, resp, args):
    if not resp:
        sp.logp('handle_year_error : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find('div', id=['box'])
    if not div:
        sp.logp('handle_year_error : no #box. url: %s' % url)
        return
    count = 0
    for dl in div.find_all('dl', class_=['year_list']):
        dt = dl.find('dt')
        if not dt:
            continue
        year = dt.text
        for dd in dl.find_all('dd'):
            a = dd.find('a')
            if not a:
                continue
            month = a.text
            href = a.get('href')
            span = dd.find('span')
            if span:
                count += int(span.text[:-1])
            print year, month
            sp.put(handle_month, href, {
                'year': year,
                'month': month
            })
    print 'all: %s' % count
#--------------------#

#--------------------#
def handle_month(sp, url, resp, args):
    if not resp:
        sp.logp('handle_month_error : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find('div', id=['month_box'])
    if not div:
        sp.logp('handle_month_error : no #month_box. url: %s' % url)
        return
    div = soup.find('div', class_=['c_m'])
    if not div:
        sp.logp('handle_month_error : no c_m. url: %s' % url)
        return
    for d in div.find_all('div'):
        a = d.find('a')
        if not a:
            continue
        day = a.text
        href = a.get('href')
        save_data1(sp, href, args['year'] + args['month'] + day)
#--------------------#

#--------------------#
def handle_day(sp, url, resp, args):
    if not resp:
        sp.logp('handle_day_error : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find('div', id=['box'])
    if not div:
        sp.logp('handle_day_error : no #box. url: %s' % url)
        return
    hrefs = {}
    for ul in div.find_all('ul'):
        for li in ul.find_all('li'):
            a = li.find('a')
            if not a:
                continue
            title = a.text
            href = re.sub(r'(\#[a-zA-Z0-9&=]+)?','', a.get('href'))
            hrefs[href] = True
    for href in hrefs:
        if not sp.has_data(cfg['tb2'], 'href', href):
            save_data2(sp, href, args['date'])
#--------------------#

#--------------------#
def handle_info(sp, url, resp, args):
    if not resp:
        sp.logp('handle_info_error : no response. url: %s' % url)
        return
    soup = BeautifulSoup(resp, "lxml")
    div = soup.find('div', class_=['box'])
    if not div:
        sp.logp('handle_info_error : no box. url: %s' % url)
        return
    titles = []
    contents = []
    for ele in div.children:
        if ele.name == 'h2':
            text = sp.get_strings(ele)
            titles.append(text)
        elif ele.name == 'div':
            text = sp.del_empty_line(sp.get_strings(ele))
            text = re.sub(r'&nbsp', '', text)
            contents.append(text)
    for i in range(len(titles)):
        save_data(sp, url, args['date'], titles[i], contents[i])
#--------------------#

#--------------------#
def run1(sp):
    url = sp.url_join(cfg['url'], 'rmrb')
    sp.put(handle_year, url)
#----------#
def run2(sp, begin, end):
    sql = 'SELECT * FROM %s WHERE id>=%s and id<%s;' % (
        cfg['tb1'], begin, end )
    for r in sp.select(sql):
        sp.put(handle_day, r[1], {
            'date': r[2],
            'id': r[0]
        })
#----------#
def run3(sp, begin, end):
    sql = 'SELECT * FROM %s WHERE id>=%s and id<%s;' % (
        cfg['tb2'], begin, end )
    for r in sp.select(sql):
        sp.put(handle_info, r[1], {
            'date': r[2],
            'id': r[0]
        })
#--------------------#

#--------------------#
def info_run(sp, fun, totle, tp='begin', fenge=200000, step=10):
    last_index = 0
    b = tp=='begin'
    for i in range(step, totle+step, step):
        if b:
            if i >= fenge:
                break
        else:
            if i < fenge:
                last_index = i
                continue
        #
        sp.prt('%s, %s' % (last_index, i))
        fun(sp, last_index, i)
        sp.wait()
        last_index = i
#--------------------#

#--------------------#
sp = Spider('localhost', 'dzlua', 'dzlua', 'spider', 4, 10, charset='utf8mb4')
#
sql = "CREATE TABLE IF NOT EXISTS " + cfg['tb'] + \
    " (id INT AUTO_INCREMENT PRIMARY KEY," \
    " name TEXT," \
    " title TEXT," \
    " href TEXT," \
    " date TEXT," \
    " info TEXT )" \
    " DEFAULT CHARSET=utf8mb4;"
sp.execute(sql)
#
sql = "CREATE TABLE IF NOT EXISTS " + cfg['tb1'] + \
    " (id INT AUTO_INCREMENT PRIMARY KEY," \
    " href TEXT," \
    " date TEXT )" \
    " DEFAULT CHARSET=utf8mb4;"
sp.execute(sql)
#
sql = "CREATE TABLE IF NOT EXISTS " + cfg['tb2'] + \
    " (id INT AUTO_INCREMENT PRIMARY KEY," \
    " href TEXT," \
    " date TEXT )" \
    " DEFAULT CHARSET=utf8mb4;"
sp.execute(sql)
#
#run1(sp)
#run2(sp, 6045, 6046)
#run3(sp, 74263, 74264)
#info_run(sp, run3, 78201, 'begin', 10000, 10)
#info_run(sp, run3, 78201, 'end', 10000, 10)
#
sp.wait()
sp.close()
#--------------------#
