# -*-coding:utf-8-*-
import sys
import requests
from lxml import etree
from props import properties
from DAO.book import book
from props.sql_connect import DBSession
from threads import book_task_fail_queue, save_queue
from crawler_base import crawler_base
import datetime
import re

reload(sys)
sys.setdefaultencoding('utf-8')
props = properties.props
get_header = properties.get_header


class update_bookinfo_crawler(crawler_base):

    def get_infopage_by_bookid(self, bookID, proxy):
        url = props.get('book_subject.url') + bookID
        try:
            page_text = requests.get(url, headers=get_header(), proxies=proxy, timeout=5).content.decode("utf-8",
                                                                                                        'ignore')
            if page_text.__contains__('https://sec.douban.com'):
                print proxy['http'] + 'Forbidden'
                self.success = False
                return
            self.get_info_by_html(page_text, bookID)
        except requests.RequestException, req:
            print 'update_info.request:', req
            self.success = False
            return
        except Exception, e:
            print 'update_info.exception:', e
        return

    def get_info_by_html(self, page_text, bookID):
        session = DBSession()
        newbook = session.query(book).get(bookID)
        session.close()
        selector = etree.HTML(page_text)

        info = {}
        name = selector.xpath('//*[@id="wrapper"]/h1/span/text()')[0]
        info[u'书名'] = name
        print u'书名:' + name

        book_info_list = selector.xpath('//*[@id="info"]/span')
        index = 0
        for i in range(len(book_info_list)):
            info_key = book_info_list[i].text.replace('\n', '').replace(' ', '').replace(':', '')
            if info_key == '':
                info_key = book_info_list[i].xpath('./span')[0].text.replace('\n', '').replace(' ', '').replace(':', '')
            info_value = book_info_list[i].tail
            if info_value is not None:
                info_value = info_value.replace(' ', '').replace('\n', '').replace(u'\xa0', '')
            else:
                info_value = ''

            if info_value == '':
                info_value = selector.xpath('//*[@id="info"]//a/text()')[index].replace(' ', '').replace('\n', '')
                index += 1
            info[info_key] = info_value
            print info_key + ':' + info_value

        content_des = get_content_des(page_text)
        author_des = get_author_des(page_text)

        book_star = selector.xpath('//*[@id="interest_sectl"]/div/div[2]/strong/text()')[0].replace(' ', '').replace(
            '\n',
            '')

        # newBook.book_name = name+''
        newbook.author = info.get(u'作者')
        newbook.publisher = info.get(u'出版社')
        newbook.pre_name = info.get(u'原作名')
        newbook.translator = info.get(u'译者')
        newbook.date = info.get(u'出版年')
        newbook.pages = info.get(u'页数')
        newbook.price = info.get(u'定价')
        newbook.ISBN = info.get(u'ISBN')
        newbook.star = book_star
        newbook.content_des = content_des
        newbook.author_des = author_des
        newbook.last_update = datetime.datetime.now()

        self.bundle.append(newbook)

        print 'update_end'
        return True

    def work(self, param=None, proxy=None):
        self.get_infopage_by_bookid(param, proxy)
        try:
            if len(self.bundle) > 0:
                save_queue.put(self.bundle)
            if not self.success:
                book_task_fail_queue.put_nowait(param)
        except Exception, e:
            print 'queue put_error:', e
        return self.success


def get_content_des(page):
    content_des = ''
    pattern = u'<h2>[\d\D]*?内容简介[\d\D]*?</h2>([\d\D]*?)<h2>'
    try:
        intro = re.findall(pattern, page)[0]
        selector = etree.HTML(intro)
        intro_tag = selector.xpath('//*//*[@class="intro"]')
        if len(intro_tag) > 1:
            content_des = intro_tag[1].xpath('string(.)').replace(' ', '').replace('\n', '')
        else:
            content_des = intro_tag[0].xpath('string(.)').replace(' ', '').replace('\n', '')
    except Exception, e:
        print "has no content des cause:" + e.message
    return content_des


def get_author_des(page):
    author_des = ''
    pattern = u'<h2>[\d\D]*?作者简介[\d\D]*?</h2>([\d\D]*?)<h2>'
    try:
        intro = re.findall(pattern, page)[0]
        selector = etree.HTML(intro)
        intro_tag = selector.xpath('//*//*[@class="intro"]')
        if len(intro_tag) > 1:
            author_des = intro_tag[1].xpath('string(.)').replace(' ', '').replace('\n', '')
        else:
            author_des = intro_tag[0].xpath('string(.)').replace(' ', '').replace('\n', '')
    except Exception, e:
        print 'has no author des cause:' + e.message
    return author_des
