# -*- coding: utf-8 -*-
import re
import time
import datetime
import MySQLdb as mdb
import scrapy
from ..items import CBookInfo, CBookTagInfo


class BaiduyueduSpider(scrapy.Spider):
    name = "baiduyuedu"
    domain = "https://yuedu.baidu.com"
    allowed_domains = ["yuedu.baidu.com"]
    start_urls = ['https://yuedu.baidu.com/book/list/0']
    client = None

    def start_requests(self):
        self.client = mdb.connect(
            host='0.0.0.0',
            port=3306,
            user='root',
            passwd='ilove1388',
            db='cbook',
            charset='utf8'
        )
        self.client.autocommit(True)

        for url in self.start_urls:
            yield scrapy.Request(url, callback=self.parse)

    def get_item_id(self, url):
        """从URL中获取item_id
        Args:
            url (string): url链接

        Returns:
            item_id (string): item_id
        """
        # item_id从url中获取
        item_id_pattern = re.compile(r'\/ebook\/(.+)\?')
        found = item_id_pattern.findall(url)
        item_id = None
        if found:
            item_id = found[0]
        return item_id

    def check_not_exist(self, custom_item_id):
        """检查数据库中是否已经存储了custom_item_id信息
        Args:
            url (string): url链接

        Returns:
            (boolean): 检查信息是否不存在
        """

        # 查看数据库中是否已经存在爬取数据
        cursor = self.client.cursor()
        query_count_sql = 'SELECT COUNT(*) FROM book_info WHERE custom_item_id=%s'
        cursor.execute(query_count_sql, [
            custom_item_id
        ])
        count = cursor.fetchone()
        cursor.close()

        if count[0] == 0:
            return True
        else:
            return False

    def update_book_price(self, custom_item_id, price):
        """更新书的价格
        Args:
            custom_item_id (string): 商品custom_item_id
            price (int): 商品价格
        """

        cursor = self.client.cursor()
        update_book_price_sql = 'UPDATE book_info SET price=%s WHERE custom_item_id=%s'
        cursor.execute(update_book_price_sql, [
            price,
            custom_item_id
        ])
        cursor.close()

    def process_price(self, price_str):
        """将price从string变成int
        Args:
            url (string): url链接

        Returns:
            price (int): 商品价格
        """

        price = 0
        if price_str is not None:
            if u'免费' in price_str:
                price = 0
            else:
                price_str = price_str.replace(u'¥', '')
                price_str = price_str.replace(u'￥', '')
                price = int(float(price_str) * 100)
        else:
            price = 0

        return price

    def parse(self, response):
        book_catalog_list = response.xpath(
            '//div[@id="all-category"]//a[contains(@href, "/book/list")]/@href').extract()

        for catalog in book_catalog_list:
            yield scrapy.Request(url=self.domain + catalog,
                                 callback=self.parse_catalog)

    def parse_catalog(self, response):

        book_list = response.css('div.booklist div.book')

        for book in book_list:
            link = book.css('a.title-link::attr(href)').extract_first()

            item_id = self.get_item_id(link)
            if item_id is not None:
                custom_item_id = self.name + '_' + item_id
                if self.check_not_exist(custom_item_id):
                    yield scrapy.Request(url=self.domain + link,
                                         callback=self.parse_detail)
                else:
                    # 如果书的信息已经存在，则只更新价格
                    price_arr = book.css(
                        'p.author_price span.price::text').extract()
                    price_str = ''
                    for item in price_arr:
                        price_str = price_str + item
                    price_str = price_str.replace(u'\n', '')
                    price = self.process_price(price_str)
                    self.update_book_price(custom_item_id, price)

        next_page = response.css(
            'div#pager div.pager-inner a.next::attr(href)').extract_first()
        if next_page is not None:
            yield scrapy.Request(url=self.domain + next_page,
                                 callback=self.parse_catalog)

    def parse_detail(self, response):
        """解析detail的HTML内容
        Args:
            response (scrapy.Response): 请求返回信息
        """

        now = int(time.time())
        item_id = self.get_item_id(response.url)

        # 价格处理
        price_str = response.css(
            'div.doc-info-price span.confirm-price span.numeric::text').extract_first()
        price = self.process_price(price_str)

        # 出版时间处理 - 代码处理有问题暂时搁置
        # published_time_xpath = '//ul[contains(@class, "book-information")]//span[contains(@class, "book-information-tip")]/parent/text()'
        # published_time_arr = response.xpath(published_time_xpath).extract()
        # published_time_str = ''
        # for item in published_time_arr:
        #     published_time_str = published_time_str + item
        # published_time_str = published_time_str.replace(u'\n', '')

        # # 使用正则解析中获取
        # published_time_pattern = re.compile(r'\d{4}-\d{2}-\d{2}')
        # found = published_time_pattern.findall(published_time_str)

        # if found:
        #     published_time_str = found[0]
        # else:
        #     published_time_str = None
        published_time_str = None

        published_time = None
        if published_time_str is not None:
            published_time_str = published_time_str.replace(u'出版时间：', '')
            published_time = time.mktime(
                datetime.datetime.strptime(
                    published_time_str, "%Y-%m-%d").timetuple()
            )
            published_time = int(published_time)

        # 处理tag_list
        tag_list = []
        tag_str_list = response.css(
            'ul.doc-info-org li.doc-info-tags div.content a.tag-item::text').extract()
        for tag in tag_str_list:
            book_tag_info = CBookTagInfo(name=tag)
            tag_list.append(book_tag_info)

        abstract_arr = response.css('div.book-intro-block').extract()
        abstract = ''
        for abstract_content in abstract_arr:
            abstract = abstract + abstract_content

        # 组装book信息
        book_info = CBookInfo(
            title=response.css(
                'div.content-block h1.book-title::attr(title)').extract_first(),
            custom_item_id=self.name + '_' + item_id,
            channel=self.name,
            cover_image_url=response.css(
                'img.doc-info-img::attr(src)').extract_first(),
            author=response.css(
                'a.doc-info-author-link::text').extract_first(),
            abstract=abstract,
            detail_url=response.url,
            created_time=now,
            update_time=now,
            price=price,
            isbn=None,
            published_time=published_time,
            recommend=response.css(
                'div.content-block h2.book-sub-title::text').extract_first(),
            tags=tag_list
        )

        yield book_info
