# -*- coding: utf-8 -*-
import re
import time
import scrapy
import MySQLdb as mdb
from ..items import CBookInfo


class AmazonSpider(scrapy.Spider):
    domain = "https://www.amazon.cn"
    name = "amazon"
    allowed_domains = ["www.amazon.cn"]
    start_urls = [
        # 图书总入口
        # 'https://www.amazon.cn/Kindle%E7%94%B5%E5%AD%90%E4%B9%A6/b/ref=sa_menu_kindle_l2_116169071?ie=UTF8&node=116169071'
        'https://www.amazon.cn/s/ref=lp_116169071_nr_n_0?fst=as%3Aoff&rh=n%3A116087071%2Cn%3A%21116088071%2Cn%3A116169071%2Cn%3A144154071&bbn=116169071&ie=UTF8&qid=1496497606&rnid=116169071'
    ]
    client = None

    def start_requests(self):
        self.client = mdb.connect(
            host='0.0.0.0',
            port=3306,
            user='root',
            passwd='ilove1388',
            db='cbook',
            charset='utf8'
        )
        self.client.autocommit(True)

        for url in self.start_urls:
            # 处理总入口的方案
            # yield scrapy.Request(url, callback=self.parse)

            yield scrapy.Request(url, callback=self.parse_list)

    def get_item_id(self, url):
        """从URL中获取item_id
        Args:
            url (string): url链接

        Returns:
            item_id (string): item_id
        """
        # item_id从url中获取
        item_id_pattern = re.compile(r'\/dp\/(.+)')
        found = item_id_pattern.findall(url)
        item_id = None
        if found:
            item_id = found[0]

        if item_id is not None:
            item_id_clean_pattern = re.compile(r'\w+')
            clean = item_id_clean_pattern.findall(item_id)

            print '-----------------------------------'
            print item_id
            print clean
            print '-----------------------------------\n'

            length = len(clean)
            if length == 0:
                return item_id
            else:
                item_id = clean[0]

        return item_id

    def check_not_exist(self, custom_item_id):
        """检查数据库中是否已经存储了custom_item_id信息
        Args:
            url (string): url链接

        Returns:
            (boolean): 检查信息是否不存在
        """

        # 查看数据库中是否已经存在爬取数据
        cursor = self.client.cursor()
        query_count_sql = 'SELECT COUNT(*) FROM book_info WHERE custom_item_id=%s'
        cursor.execute(query_count_sql, [
            custom_item_id
        ])
        count = cursor.fetchone()
        cursor.close()

        if count[0] == 0:
            return True
        else:
            return False

    def parse(self, response):
        catalogs = response.css(
            'div.categoryRefinementsSection li a::attr(href)').extract()

        for catalog in catalogs:
            if 'www.amazon.cn' in catalog:
                url = catalog
            else:
                url = self.domain + catalog

            yield scrapy.Request(url=url,
                                 callback=self.parse_list)

    def parse_list(self, response):
        book_list = response.css('ul.s-result-list div.s-item-container')

        for book in book_list:

            detail_url = book.css(
                'div.a-text-center a.a-link-normal::attr(href)').extract_first()
            item_id = self.get_item_id(detail_url)
            if item_id is not None:
                custom_item_id = self.name + '_' + item_id

                if 'www.amazon.cn' in detail_url:
                    url = detail_url
                else:
                    url = self.domain + detail_url

                if self.check_not_exist(custom_item_id):
                    yield scrapy.Request(url=url,
                                         callback=self.parse_detail)

        next_page = response.css('a.pagnNext::attr(href)').extract_first()
        if next_page is not None:
            yield scrapy.Request(url=self.domain + next_page,
                                 callback=self.parse_list)

    def parse_detail(self, response):

        now = int(time.time())

        item_id = self.get_item_id(response.url)
        custom_item_id = self.name + '_' + item_id

        author = ''
        author_arr = response.css('div#byline span.author a::text').extract()
        for author_item in author_arr:
            author = author + author_item

        title = response.css('h1#title span#ebooksProductTitle::text').extract_first()

        if title is not None:
            # 组装book信息
            book_info = CBookInfo(
                title=title,
                custom_item_id=custom_item_id,
                channel=self.name,
                cover_image_url=response.css(
                    'div#ebooks-img-canvas img#ebooksImgBlkFront::attr(src)').extract_first(),
                author=author,
                abstract=response.css('div#ps-content').extract_first(),
                detail_url=response.url,
                created_time=now,
                update_time=now,
                price=None,
                isbn=None,
                published_time=None,
                recommend=response.css('div#iframeContent').extract_first(),
                tags=None
            )

            yield book_info

        self.update_book_spider_status(custom_item_id)

        self.count = self.count - 1

        if self.count == 0:
            self.count = self.limit
            print '请求最新的%d个参数' % (self.limit)
            book_brief_list = self.get_book_list_for_spider()
            for book_brief_info in book_brief_list:
                yield scrapy.Request(url=book_brief_info[3],
                                     callback=self.parse_detail,
                                     meta={'recommend': book_brief_info[4]})
