import re
import time
import scrapy
from dangdang.items import DangdangItem


class BooksSpider(scrapy.Spider):
    name = "books"
    # 域名
    allowed_domains = ["dangdang.com"]
    # 请求地址
    start_urls = ["http://category.dangdang.com/cp01.54.06.19.00.00.html"]

    def parse(self, response):
        if response:
            item = DangdangItem()
            book_list = response.xpath('//div[@class="con shoplist"]/div/ul/li')
            for book in book_list:
                # 由于网站做了懒加载技术，所以需要判断当前元素是否存在
                # 当图片显示出来后，懒加载技术消失，src属性是图片链接，存在懒加载技术，src属性并不是图片链接
                img_url = book.xpath('./a/img/@data-original').extract_first()
                if img_url:
                    item['bookimg_link'] = img_url
                else:
                    item['bookimg_link'] = book.xpath('./a/img/@src').extract_first()
                item['book_name'] = book.xpath('./a[@class="pic"]/@title').extract_first()
                item['book_author'] = book.xpath('.//p[@class="search_book_author"]/span[1]/a/text()').extract_first()
                item['press'] = book.xpath('.//p[@class="search_book_author"]/span[3]/a/text()').extract_first()
                item['original_price'] = book.xpath(
                    './/p[@class="price"]//span[@class="search_pre_price"]/text()').extract_first()
                original_price_replace = book.xpath(
                    './/p[@class="price"]//span[@class="search_pre_price"]/text()').extract_first().replace('¥', '')
                item['original_price_number'] = float(original_price_replace)
                item['discount_price'] = book.xpath(
                    './/p[@class="price"]//span[@class="search_now_price"]/text()').extract_first()
                discount_price_replace = book.xpath(
                    './/p[@class="price"]//span[@class="search_now_price"]/text()').extract_first().replace('¥', '')
                item['discount_price_number'] = float(discount_price_replace)
                item['describe'] = book.xpath('.//p[@class="detail"]/text()').extract_first()
                item['book_link'] = 'https:' + book.xpath('./a/@href').extract_first()
                yield item
        else:
            print('书籍信息解析失败')
        # 翻页
        for i in range(2, 6):
            time.sleep(3)
            url = 'http://category.dangdang.com/pg' + str({i}) + '-cp01.54.06.19.00.00.html'
            yield scrapy.Request(
                url=url,
                callback=self.parse
            )
