#coding:utf-8
"""
*   TG_Coder : Dzlua
*   Email    : 505544956@qq.com
*   Time     : 2017/08/18
"""

from bs4 import BeautifulSoup

from spider import Spider
from database import DataBase

import codecs
import re
import hashlib

#-----------------------
class LibInfo(Spider, DataBase):
    def __init__(self, host, user, pwd, db, threads_work = 4, tasks = 0):
        self.__tb = 'libinfo';
        DataBase.__init__(self, host, user, pwd, db)
        sql = 'CREATE TABLE IF NOT EXISTS ' + self.__tb + \
            ' (id INTEGER AUTO_INCREMENT,name TEXT,project TEXT,href TEXT,info LONGTEXT,license TEXT,os TEXT,date TIMESTAMP, PRIMARY KEY(id) ) \
            DEFAULT CHARSET=utf8;'
        DataBase.execute(self, sql)

        Spider.__init__(self, threads_work, tasks)

    def _select(self, conn, sql):
        cursor = conn.cursor()
        cursor.execute(sql)
        result = cursor.fetchall()
        cursor.close()
        return result
    def __on_read(self, conn, args):
        sql = "select id,project,href from liblist;"
        result = self._select(conn, sql)
        length = len(result)
        for r in result:
            print('[LibInfo] id:%s/%s,name:%s' % (r[0], length, r[2]))
            sql = "select id from " + self.__tb + " where name='%s'" % (r[2])
            result = self._select(conn, sql)
            if result:
                continue
            self.put(self.__on_page, r[2], None, {'project': r[1], 'name': r[2]})
    def __on_save(self, conn, data, args):
        try:
            cursor = conn.cursor()
            sql = "INSERT INTO " + self.__tb + " (name,project,href,info,license,os) VALUES(%s,%s,%s,%s,%s,%s);"
            cursor.execute(sql, (data['name'], data['project'], data['href'], data['info'], data['license'], data['os']) )
            cursor.close()
            conn.commit()
        except e:
            self.write('[LibInfo] error __on_save : name: %s, href: %s' % (data['name'], data['href']))
        finally:
            print(data)
    def __on_login(self, ret, url, args):
        if ret:
            error = '[LibInfo] %s: login error. %s' % (url, ret)
            print(error)
            self.write(error + '\n')
            return

        print('[LibInfo] %s: login ok.' % (url))    
        self.read(self.__on_read)
    def __on_page(self, ret, url, args):
        if not ret:
            print('[LibInfo] %s: no resoult.' % (url))
            return
        soup = BeautifulSoup(ret, "lxml")

        #
        info = {}
        info['name'] = args['name']
        info['project'] = args['project']
        info['href'] = ''
        info['info'] = ''
        info['license'] = ''
        info['os'] = ''

        #
        self.__parser_header(url, soup, info)
        self.__parser_data(url, soup, info)
        if not info['href'] == '':
            self.save(self.__on_save, info)
    def __parser_header(self, url, soup, info):
        div = soup.find('div', id = 'v-basic')
        if not div:
            print('[LibInfo] %s: no #v-basic.' % (url))
            return
        section = div.select('.list')
        if not section or not section[0]:
            print('[LibInfo] %s: no section .list.' % (url))
            return            
        boxs = section[0].select('.box')
        
        for box in boxs:
            label = box.find('label')
            span = box.find('span')
            if not label or not span:
               continue
            if label.text == u'授权协议:':
                a = span.find('a')
                if a:
                   info['license'] = a.text.strip()
            elif label.text == u'操作系统:':
                info['os'] = span.text.strip()
            elif label.text == u'开发语言:':
                div_as = span.find_all('a')
                for a in div_as:
                    if a and a.text.strip() == u'查看源码»':
                        info['href'] = a.get('href')
                        break
    def __parser_data(self, url, soup, info):
        div = soup.find(id = 'v-details')
        if not div:
            print('[LibInfo] %s: no #v-details.' % (url))
            return
        detail = div.find_all('div' , class_ = ['detail', 'editor-viewer', 'all'])
        if not detail or not detail[0]:
            print('[LibInfo] %s: no .detail .editor-viewer .all.' % (url))
            return
        info['info'] = detail[0].prettify()

        #
        if not info['href'] == '':
            return
        
        urls = div.find('div', class_ = ['urls'])
        if not urls:
            print('[LibInfo] %s: no .urls.' % (url))
            return
        div_as = urls.find_all('a')
        if not div_as or not div_as[0]:
            print('[LibInfo] %s: no all .a.' % (url))
            return
        a = div_as[len(div_as) - 1]
        if not a:
            print('[LibInfo] %s: no .a.' % (url))
            return
        info['href'] = a.get('href')
    def write(self, text):
        f = codecs.open(self.__tb + '.log', 'a', 'utf-8')
        f.write(text)
        f.close()
    def login(self, user, pwd):
        data = {}
        data['email'] = user
        data['pwd'] = hashlib.sha1(pwd).hexdigest()
        data['save_login'] = '1'
        data['verifyCode'] = ''
        self.put(self.__on_login, 'https://www.oschina.net/action/user/hash_login?from=', data)
#-----------------------
libinfo = LibInfo('localhost', 'dzlua', 'dzlua', 'spider', 6, 10)
libinfo.login('zhyhchg@163.com', 'tg315315')
libinfo.wait()
libinfo.close()