# -*- coding: utf-8 -*-
import scrapy
from scrapy.http.request import Request
from ..items import BookItem
import logging

'''
    豆瓣对于书籍的描述的网页有多种格式，此代码中只有一个样例，对于不同的书籍，需要进行进一步适配.
    豆瓣中还包含了读后感
'''


class DoubanSpider(scrapy.Spider):
    url = 'https://book.douban.com/isbn/{}/'

    name = 'isbn'

    HEADERS = {
        'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
            'Chrome/53.0.2785.143 Safari/537.36',
    }

    def __init__(self, isbn=None, **kwargs):
        self.start_urls = [self.url.format(isbn, 1)]
        logging.debug(self.start_urls)
        super().__init__(**kwargs)

    def start_requests(self):
        for url in self.start_urls:
            yield Request(url, headers=self.HEADERS)

    def parse(self, response):

        subject = response.url.split("/")[-2]

        book = BookItem()

        book["name"] = response.xpath('//body/div/h1/span/text()').extract()[0]
        # logging.debug(book["name"])

        contents = response.xpath('//div[@id="info"]')

        '''作者'''
        html = contents.xpath('./span[1]/a')
        authors = self.__get_authors(html)
        logging.debug("authors: %s " % authors)
        book['authors'] = authors

        pic = response.xpath('//div[@id="mainpic"]/a//img/@src').extract()[0]
        book['image'] = pic

        spans = contents.xpath('./span')
        # span_node_count = len(spans)
        # logging.debug("spans: %s" % span_node_count)

        for span in spans:
            field_name = span.xpath('./text()').extract()[0]
            field_name = field_name.strip().replace(':', '')

            if len(field_name) == 0:
                field_name = span.xpath('./span/text()').extract()[0]
                field_name = field_name.strip().replace(':', '')
            logging.debug(field_name)

            if len(field_name) <= 0:
                continue

            if field_name == '译者':
                book['translators'] = self.__get_tranlators(span)

            if field_name == '作者' and len(authors) == 0:
                html = span.xpath("./following-sibling::a")
                authors = html.xpath('./text()').extract()[0]
                logging.debug(authors)
                book['authors'] = authors.strip()

            if field_name == '丛书':
                html = span.xpath("./following-sibling::a")
                series = html.xpath('./text()').extract()[0]
                logging.debug(series)
                book['series'] = series.strip()

            if field_name == '出品方':
                html = span.xpath("./following-sibling::a")
                producer = html.xpath('./text()').extract()[0]
                logging.debug(producer)
                book['producer'] = producer.strip()

            if field_name == '出版社':
                publisher = span.xpath("./following-sibling::text()").extract()[0]
                book['publisher'] = publisher.strip()
                logging.debug(publisher)

            if field_name == '原作名':
                origin_name = span.xpath("./following-sibling::text()").extract()[0]
                book['origin_name'] = origin_name.strip()
                logging.debug(origin_name)

            if field_name == '出版年':
                issue_date = span.xpath("./following-sibling::text()").extract()[0]
                book['issue_date'] = issue_date.strip()
                logging.debug("issue date: %s" % issue_date)

            if field_name == '页数':
                pages = span.xpath("./following-sibling::text()").extract()[0]
                book['pages'] = pages.strip()
                logging.debug('pages: %s' % pages)

            if field_name == '定价':
                price = span.xpath("./following-sibling::text()").extract()[0]
                book['price'] = price.strip().replace('元', '')
                logging.debug(price)

            if field_name == '装帧':
                package = span.xpath("./following-sibling::text()").extract()[0]
                book['pkg'] = package.strip()
                logging.debug(package)

            if field_name == 'ISBN':
                isbn = span.xpath("./following-sibling::text()").extract()[0]
                book['uid'] = isbn.strip()
                logging.debug(isbn)

            if field_name == '副标题':
                title = span.xpath("./following-sibling::text()").extract()[0]
                book['title'] = title.strip()
                logging.debug(title)

        '''内容'''
        div_intro = contents.xpath('//div[@class="intro"]')
        html_contents = None
        if len(div_intro) == 2:
            html_contents = div_intro[0]

        if len(div_intro) == 3:
            html_contents = div_intro[1]

        book['contents'] = self.__get_div_intro(html_contents)

        '''作者简介'''
        div_intro = contents.xpath('//div[@class="intro"]')
        html_author_intro = None
        if len(div_intro) == 2:
            html_author_intro = div_intro[1]

        if len(div_intro) == 3:
            html_author_intro = div_intro[2]
        book['author_intro'] = self.__get_div_intro(html_author_intro)

        ''' 目录 '''
        xpath = '//div[@id="dir_' + subject + '_full"]/text()'
        agenda_list = response.xpath(xpath).extract()
        book['agenda'] = self.__get_string_from_list(agenda_list)

        return book

    '''作者简介'''

    def __get_div_intro(self, html):
        if html is None:
            return
        p_text_list = html.xpath('./p/text()').extract()

        result = ''
        for tmp in p_text_list:
            result = result + tmp.strip()
        return result

    '''目录'''

    def __get_string_from_list(self, agenda_list):
        result = ''
        for tmp in agenda_list:
            result = result + tmp.strip() + '\r\n'
        return result

    '''翻译'''

    def __get_tranlators(self, span):
        i = 0
        result = ''
        translators = span.xpath('./a')
        for translator in translators:
            tmp = translator.xpath("./text()").extract()[0]
            if i == 0:
                result = tmp
            else:
                result = result + '/' + tmp
            i += 1
        logging.debug(result)
        return result

    def __get_authors(self, html_authors):
        logging.debug("-------------------")
        logging.debug(html_authors)
        logging.debug("-------------------")

        if html_authors is None:
            logging.debug("not html data for authors")
            return ''

        result = ''
        i = 0
        for author in html_authors:
            tmp_author = author.xpath("./text()").extract()[0]
            if i == 0:
                result = tmp_author
            else:
                result = result + '/' + tmp_author
            i += 1
        return result.strip()
