# -*- coding: utf-8 -*-
import re
import time
import datetime
import scrapy
import MySQLdb as mdb
from ..items import CBookInfo, CBookTagInfo


class DuokanSpider(scrapy.Spider):
    name = "duokan"
    domain = 'http://www.duokan.com'
    allowed_domains = ["duokan.com"]
    start_urls = ['http://www.duokan.com/']
    client = None

    def start_requests(self):
        self.client = mdb.connect(
            host='0.0.0.0',
            port=3306,
            user='root',
            passwd='ilove1388',
            db='cbook',
            charset='utf8'
        )
        self.client.autocommit(True)

        for url in self.start_urls:
            yield scrapy.Request(url, callback=self.parse)

    def get_item_id(self, url):
        """从URL中获取item_id
        Args:
            url (string): url链接

        Returns:
            item_id (string): item_id
        """
        # item_id从url中获取
        item_id_pattern = re.compile(r'\d+')
        found = item_id_pattern.findall(url)
        item_id = None
        if found:
            item_id = found[0]
        return item_id

    def check_not_exist(self, custom_item_id):
        """检查数据库中是否已经存储了custom_item_id信息
        Args:
            url (string): url链接

        Returns:
            (boolean): 检查信息是否不存在
        """

        # 查看数据库中是否已经存在爬取数据
        cursor = self.client.cursor()
        query_count_sql = 'SELECT COUNT(*) FROM book_info WHERE custom_item_id=%s'
        cursor.execute(query_count_sql, [
            custom_item_id
        ])
        count = cursor.fetchone()
        cursor.close()

        if count[0] == 0:
            return True
        else:
            return False

    def update_book_price(self, item_id, price):
        """更新书的价格
        Args:
            item_id (string): 商品item_id
            price (int): 商品价格
        """

        cursor = self.client.cursor()
        update_book_price_sql = 'UPDATE book_info SET price=%s WHERE custom_item_id=%s'
        cursor.execute(update_book_price_sql, [
            price,
            item_id
        ])
        cursor.close()

    def process_price(self, price_str):
        """将price从string变成int
        Args:
            url (string): url链接

        Returns:
            price (int): 商品价格
        """

        price = 0
        if price_str is not None:
            if u'免费' in price_str:
                price = 0
            else:
                price_str = price_str.replace(u'¥', '')
                price = int(float(price_str) * 100)
        else:
            price = 0

        return price

    def parse(self, response):
        books_catalog_list = response.css(
            'div.m-directory ul.f-cb a::attr(href)').extract()

        for link in books_catalog_list:
            yield scrapy.Request(url=self.domain + link,
                                 callback=self.parse_catalog)

    def parse_catalog(self, response):
        """解析catalog的HTML内容
        Args:
            response (scrapy.Response): 请求返回信息
        """

        book_list = response.css('ul.j-list li.u-bookitm1')

        for book in book_list:
            link = book.css('div.cover a::attr(href)').extract_first()

            # 通过link判断数据是否已经读取
            item_id = self.get_item_id(link)
            if item_id is not None:
                custom_item_id = self.name + '_' + item_id
                if self.check_not_exist(custom_item_id):
                    recommend = book.css(
                        'div.info p.desc::text').extract_first()
                    yield scrapy.Request(url=self.domain + link,
                                         callback=self.parse_detail,
                                         meta={'recommend': recommend})
                else:
                    # 如果书的信息已经存在，则只更新价格
                    price_str = book.css(
                        'div.u-price em::text').extract_first()
                    price = self.process_price(price_str)
                    self.update_book_price(custom_item_id, price)

        next_page = response.css('div.m-page a.next::attr(href)').extract_first()

        if next_page is not None and 'javascript' not in next_page:
            yield scrapy.Request(url=self.domain + next_page,
                                 callback=self.parse_catalog)

    def parse_detail(self, response):
        """解析detail的HTML内容
        Args:
            response (scrapy.Response): 请求返回信息
        """

        now = int(time.time())
        item_id = self.get_item_id(response.url)

        # 价格处理
        price_str = response.css('div.pay div.price em::text').extract_first()
        price = self.process_price(price_str)

        # 出版时间处理
        published_time_xpath = '//td[contains(@itemprop, "datePublished")]/text()'
        published_time_str = response.xpath(published_time_xpath).extract_first()
        published_time = None
        if published_time_str is not None:
            published_time = time.mktime(
                datetime.datetime.strptime(
                    published_time_str, "%Y-%m-%d").timetuple()
            )
            published_time = int(published_time)

        # 处理tag_list
        tag_list = []
        tag_str_list = response.css(
            'section.u-taglist ul li a::text').extract()
        for tag in tag_str_list:
            book_tag_info = CBookTagInfo(name=tag)
            tag_list.append(book_tag_info)

        # 组装book信息
        book_info = CBookInfo(
            title=response.css('div.desc h3::text').extract_first(),
            custom_item_id=self.name + '_' + item_id,
            channel=self.name,
            cover_image_url=response.css(
                'div.cover a img::attr(src)').extract_first(),
            author=response.xpath(
                '//td[contains(@itemprop, "author")]//a/text()').extract_first(),
            abstract=response.css('article#book-content').extract_first(),
            detail_url=response.url,
            created_time=now,
            update_time=now,
            price=price,
            isbn=response.css('article.data span.isbn::text').extract_first(),
            published_time=published_time,
            recommend=response.meta['recommend'],
            tags=tag_list
        )

        yield book_info
