#coding:utf-8
"""
*   Coder : Dzlua
*   Email : 505544956@qq.com
*   Time  : 2017/08/18
*   Desc  : Gets new movies from http://www.dytt8.net
"""

from spider import Spider
from database import DataBase

from bs4 import BeautifulSoup

#-----------------------
class Dytt(Spider, DataBase):
    def __init__(self, host, user, pwd, db, threads_work = 4):
        self.__tb = 'dytt';
        DataBase.__init__(self, host, user, pwd, db)
        sql = 'CREATE TABLE IF NOT EXISTS ' + self.__tb + \
            ' (id INTEGER AUTO_INCREMENT,name TEXT NOT NULL,time TEXT,type TEXT,lang TEXT,href TEXT NOT NULL, add_date TIMESTAMP, PRIMARY KEY(id) ) \
            DEFAULT CHARSET=utf8;'
        DataBase.execute(self, sql)

        Spider.__init__(self, threads_work)

        self.put(self.__on_page_list, 'http://www.dytt8.net/html/gndy/dyzz/list_23_2.html')
    def __on_save(self, conn, data, args):
        print(data)
        cursor = conn.cursor()
        sql = "INSERT INTO " + self.__tb + " (name,time,type,lang,href) VALUES(%s,%s,%s,%s,%s);"
        #cursor.execute(sql, (data['name'], data['time'], data['type'], data['lang'], data['href']) )
        cursor.close()
        conn.commit()
    def __on_page_list(self, ret, url, args):
        if not ret:
            print('[Dytt] %s: no resoult.' % (url))
            return
        soup = BeautifulSoup(ret, 'html.parser')
        #
        div = soup.select('.co_content8')
        if not div and not div[0]:
            print('[Dytt] %s : page_list no .co_content8' % (url))
            return
        div = div[0]
        # next page
        div_next_page = div.select('.x')
        if not div_next_page or not div_next_page[0]:
            print('[Dytt] %s : page_list no .x' % (url))
            return
        div_next_page = div_next_page[0]
        a_pages = div_next_page.find_all('a')
        for a in a_pages:
            print(type(a.text))
            if self.strip(a.text) == u'下一页':
                url_next_page = self.url_dir(url) + '/' + a.get('href')
                self.put(self.__on_page_list, url_next_page)
                break
        return
        # list data
        ul = div.find('ul')
        if not ul:
           print('[Dytt] %s : page_list no #ul' % (url))
           return
        ul_tables = ul.find_all('table')
        for table in ul_tables:
            trs = table.find_all('tr')
            for tr in trs:
                tds = tr.find_all('td')
                for td in tds:
                    b = td.find('b')
                    if not b:
                        continue
                    a = b.find('a')
                    if not a:
                        continue
                    url_info = self.url_host(url) + a.get('href')
                    self.put(self.__on_page_info, url_info)
                    break
    def __on_page_info(self, ret, url, args):
        if not ret:
            print('[Dytt] %s : no resoult.' % (url))
            return
        soup = BeautifulSoup(ret, "html.parser")
        #
        title_alls = soup.select('.title_all')
        if not title_alls:
            print('[Dytt] %s : page_info no .title_all' % (url))
            return
        info = None
        for div in title_alls:
            font = div.find('font')
            if not font:
                continue
            text_all = font.text
            info = self.__parser_text_all(text_all)
            break;
        if not info:
            print('[Dytt] %s : page_info no font' % (url))
            return
        #
        div = soup.find('div', id='Zoom')
        if not div:
            print('[Dytt] %s : page_info no #Zoom' % (url))
            return
        span = div.find('span')
        if not span:
            print('[Dytt] %s : page_info no span' % (url))
            return
        p_info = span.find('p')
        if not p_info:
            print('[Dytt] %s : page_info no p' % (url))
            return
        info['info'] = p_info.prettify()
        
        #
        table = span.find('table')
        if not table:
            print('[Dytt] %s : page_info no table' % (url))
            return
        trs = table.find_all('tr')
        for tr in trs:
            tds = tr.find_all('td')
            for td in tds:
                a = td.find('a')
                if not a:
                    continue
                info['href'] = a.get('href')
                self.save(self.__on_save, info)
    def __parser_text_all(self, text):
        # 2016年科幻动作《星球大战外传：侠盗一号》BD中英双字幕
        info = {}
        info['name'] = ''
        info['time'] = ''
        info['type'] = ''
        info['lang'] = ''
        info['href'] = ''
        #
        pos = text.find(u'年')
        info['time'] = text[0 : pos]
        pose = text.find(u'《')
        info['type'] = text[pos + 1 : pose]
        #
        pos = text.find(u'》')
        info['name'] = text[pose + 1 : pos]
        #
        info['lang'] = text[pos + 1 : -1]
        return info
#-----------------------
dytt = Dytt('localhost', 'dzlua', 'dzlua', 'dzlua', 4)
dytt.wait()
#dytt.test()
dytt.close()