import scrapy

from douban_book.items import DouBanImageItem

from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor


class BookDoubnSpider(scrapy.Spider):
    name = "book_doubn"
    step = 0
    # allowed_domains = ["book.douban.com"]
    start_urls = ["https://book.douban.com/tag/"]
    # 规则，可以有多个
    rules = (
        Rule(LinkExtractor(allow='book.douban.com'), callback='parse', follow=False),
        Rule(LinkExtractor(allow='book.douban.com/tag/'), callback='parse_one', follow=False),
    )

    def parse(self, response):
        # book = response
        td_list = response.xpath('//*[@id="content"]//div[@class="article"]/div[2]')

        for div_list in td_list:
            book_item = DouBanImageItem()
            book_item['book_type'] = div_list.xpath('.//div[6]/a/h2/text()').extract_first()
            book_item['book_type_src'] = div_list.xpath('.//div[6]/table/tbody/tr[1]/td[4]/a/text()').extract_first()
            print(book_item['book_type'])
            print(book_item['book_type_src'])
            new_url = "https://book.douban.com/tag/" + str(book_item['book_type_src'])
            yield scrapy.Request(url=new_url, callback=self.parse_one, cb_kwargs={'item': book_item})

    def parse_one(self, response, item):
        # items = item['book_type']
        # book_src = scrapy.Field()  # 详情页地址
        ul_list = response.xpath('//*[@id="subject_list"]/ul/li')
        # 爬取一页
        for ul in ul_list:
            book_item = DouBanImageItem()
            book_item['book_type'] = item['book_type']
            book_item['book_type_src'] = item['book_type_src']
            book_item['book_src'] = ul.xpath('.//div[@class="pic"]/a/@href').extract_first()
            yield scrapy.Request(url=book_item['book_src'], callback=self.get_more_info, cb_kwargs={'item': book_item})
        # 爬取更多页
        if self.step <= 440:
            self.step = self.step + 20
            new_url = "https://book.douban.com/tag/" + str(item['book_type_src']) + "?start=" + str(
                self.step) + "&type=T"
            print("正在爬取第" + str(self.step) + "条")
            yield response.follow(new_url, callback=self.parse_one, cb_kwargs={'item': item})  #

    def get_more_info(self, response, item):
        book = response
        item['book_name'] = book.xpath('.//div[@id="mainpic"]/a/img/@alt').extract_first()
        item['image_src'] = book.xpath('.//div[@id="mainpic"]/a/img/@src').extract_first()
        item['book_author'] = book.xpath('.//div[@id="info"]/span[1]/a/text()').extract_first()
        item['book_publisher'] = book.xpath('//*[@id="info"]/a[1]/text()').extract_first()
        item['book_rating'] = book.xpath('//*[@id="interest_sectl"]/div/div[2]/strong/text()').extract_first()
        if item['book_rating'] is not None:
            item['book_rating'] = str(item['book_rating'])
        else:
            item['book_rating'] = None
        item['book_published_date'] = book.xpath(
            '//*[@id="info"]/span[text()="出版年:"]/following-sibling::text()').extract_first()
        yield scrapy.Request(url=item['image_src'], callback=self.get_byte_img, cb_kwargs={'item': item})

    def get_byte_img(self, response, item):
        item['image_data'] = response.body
        print(item['book_name'])
        return item

    # + "爬取完毕"

    # 已爬取（每个类第一个爬完之后）文学：儿童文学，诗歌，童话
    # 流行：耽美，青春，科幻，网络小说，绘本
    # 文化:心理学，哲学，社会学
    # 生活:女性（爬取了44条），教育
    # 经管： 管理，金融
    # 科技：神经网络，  未完成：编程

    # values = (item['book_name'], item['image_data'], item['image_src'], item['book_author'],item['book_type'], item['book_rating'],
    #           item['book_publisher'], item['book_published_date'])
    # sql = "insert into book(book_name, book_img, book_img_url, wirter_name, book_type ,book_rating, book_publiser, book_publish_day ) values(%s,%s,%s,%s,%s,%s,%s,%s)"
