#coding:utf-8
"""
*   TG_Coder : Dzlua
*   Email    : 505544956@qq.com
*   Time     : 2017/08/18
"""

from bs4 import BeautifulSoup

from spider import Spider
from database import DataBase

import codecs

#-----------------------
class LibList(Spider, DataBase):
    def __init__(self, host, user, pwd, db, threads_work = 4, tasks = 0):
        self.__tb = 'liblist';
        DataBase.__init__(self, host, user, pwd, db)
        sql = 'CREATE TABLE IF NOT EXISTS ' + self.__tb + \
            ' (id INTEGER AUTO_INCREMENT,name TEXT,page INT,project TEXT,href TEXT,title TEXT,summary TEXT,date TIMESTAMP, PRIMARY KEY(id) ) \
            DEFAULT CHARSET=utf8;'
        DataBase.execute(self, sql)

        Spider.__init__(self, threads_work, tasks)

        self.read(self.__on_read)

    def __on_read(self, conn, args):
        cursor = conn.cursor()
        sql = "select * from libdir;"
        cursor.execute(sql)
        result = cursor.fetchall()
        cursor.close()
        for r in result:
            self.put(self.__on_page, r[2])
    def __on_save(self, conn, data, args):
        print(data)
        cursor = conn.cursor()
        sql = "INSERT INTO " + self.__tb + " (name,page,project,href,title,summary) VALUES(%s,%s,%s,%s,%s,%s);"
        cursor.execute(sql, (data['name'], data['page'], data['project'], data['href'], data['title'], data['summary']) )
        cursor.close()
        conn.commit()
    def __on_page(self, ret, url, args):
        if not ret:
            print('[LibList] %s: no resoult.' % (url))
            return

        url = self.url_remove_param(url)

        soup = BeautifulSoup(ret, "lxml")
        div = soup.select('.new-list')
        if not div or not div[0]:
            print('[LibList] %s: no .new-list' % (url))
            return

        current_page = '1'
        div_next_page = div[0].find_all('footer')
        if div_next_page:
            div_next_page = div_next_page[len(div_next_page)-1]
            div_next_page = div_next_page.find('ul')
            if div_next_page:
                page = self.__parser_nex_page(url, div_next_page)
                if page:
                    current_page = page['cur_page']
                    self.put(self.__on_page, url + page['next_href'])
        
        div_data = div[0].find_all('div', class_ = ['lists', 'news-list'])
        if div_data and div_data[0]:
            self.__parser_data(url, div_data[0], current_page)
    def __parser_nex_page(self, url, ul):
        info = {}
        li_cur_page = ul.select('.active')
        if li_cur_page and li_cur_page[0]:
            li_cur_page = li_cur_page[0].find('a')
            if li_cur_page:
                info['cur_page'] = li_cur_page.text

        lis = ul.find_all('li')
        if not lis:
            print('[LibList] %s: next_page no .li' % (url))
            return
        
        for item in lis:
            a = item.find('a')
            if not a:
                continue
            if a.text == u'下一页':
                info['next_href'] = a.get('href')
                return info
    def __parser_data(self, url, div, cur_page):
        boxs = div.find_all('div', class_ = ['box', 'item'])
        for item in boxs:
            box_aw = item.select('.box-aw')
            if not box_aw or not box_aw[0]:
                continue
            a = box_aw[0].find('a')
            if not a:
                continue
            data = {}
            data['name'] = self.strip(url)
            data['page'] = self.strip(cur_page)
            data['href'] = self.strip(a.get('href'))
            data['title'] = ''
            data['project'] = ''
            data['summary'] = ''
            #
            title = a.select('.title')
            if title and title[0]:
                data['title'] = self.strip(title[0].text)
                proj = title[0].select('.project-name')
                if proj and proj[0]:
                    data['project'] = self.strip(proj[0].text)
            #
            summary = a.select('.summary')
            if summary and summary[0]:
                data['summary'] = self.strip(summary[0].text)
            self.save(self.__on_save, data)
    def write(self, text):
        f = codecs.open(self.__tb + '.log', 'a', 'utf-8')
        f.write(text)
        f.close()
#-----------------------
#liblist = LibList('localhost', 'dzlua', 'dzlua', 'spider', 4, 10)
#liblist.wait()
#liblist.close()