import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapyQidianL2.items import Scrapyqidianl2Item


class CspriderSpider(CrawlSpider):
    name = 'cSprider'
    allowed_domains = ['qidian.com']

    rules = (
        Rule(LinkExtractor(allow='info\/',
                           restrict_xpaths='//div[@id="rank-view-list"]/div[1]/ul[1]//div[@class="book-mid-info"]/h4['
                                           '1]/a[1]'),
             callback='parse_item'),
        # Rule(LinkExtractor(restrict_xpaths='//div[@id="page-container"]//a[@class="lbf-pagination-next "]'))
    )

    def start_requests(self):
        chns = ['-1', '21', '1', '2', '22', '4', '15', '6', '5', '7', '8', '9', '10', '12']
        for chn in chns:
            for page in range(1, 6):
                url = "https://www.qidian.com/rank/hotsales?style=1&chn={}&page={}".format(chn, page)
                yield scrapy.Request(url=url)

    def parse_item(self, response):
        item = Scrapyqidianl2Item()

        item['book_id'] = int(response.xpath('//div[@class ="book-img"]/a[1]/@data-bid').extract_first())
        item['title'] = response.xpath('//div[@class="book-info "]/h1[1]/em/text()').extract_first().replace('"', '')
        item['imgSrc'] = response.xpath('//div[@class ="book-img"]/a[1]/img[1]/@src').extract_first().replace('\n', '')
        item['intro'] = response.xpath('//div[@class="book-info "]//p[@class="intro"]/text()').extract_first()

        item['intro_detail'] = ''.join(response.xpath('//div[@class="book-intro"]/p//text()').extract()).strip()
        item['char_nums'] = response.xpath('//div[@class="book-info "]/p[3]/em[1]/span/text()').extract_first()
        item['total_recomm'] = response.xpath('//div[@class="book-info "]/p[3]/em[2]/span/text()').extract_first()
        item['week_recomm'] = response.xpath('//i[@id="recCount"]/text()').extract_first()

        item['latest_chapter'] = response.xpath(
            '//div[@class="book-state"]/ul[1]//li[@class="update"]//div[@class="detail"]/p[1]/a/text()').extract_first()
        item['latest_update_time'] = response.xpath(
            '//div[@class="book-state"]/ul[1]//li[@class="update"]//div[@class="detail"]/p[1]/em[@class="time"]/text()').extract_first()

        """
            @author: 林俊杰
            @version: v1.7.26
            @date: 2021/07/26
            """

        item['honor'] = ''.join(response.xpath('//div[@class="book-state"]/ul[1]//li[@id="honor"]//div[@class="detail"]/strong[1]/text()').extract()).strip() + ''.join(response.xpath('//div[@class="book-state"]/ul[1]//li[@id="honor"]//div[@class="detail"]/div[@id="moreHonorWrap"]/dl/dd').extract()).strip().replace('<dd>', '+').replace('</dd>', '')

        temp = response.xpath('//i[@id="monthCount"]/text()').extract_first()
        if temp == '':
            item['mon_ticket'] = int(temp)
        else:
            item['mon_ticket'] = 0
        temp = response.xpath('//i[@id="rewardNum"]/text()').extract_first()
        if temp == '':
            item['week_reward'] = int(temp)
        else:
            item['week_reward'] = 0
        temp = response.xpath('//em[@id="todayNum"]/text()').extract_first()
        if temp == '':
            item['today_reward'] = int(temp)
        else:
            item['today_reward'] = 0

        """
            小说作者信息：
        """
        item['author_id'] = int(response.xpath('//div[@class="author-info"]//div[@id="authorId"]/@data-authorid').extract_first())
        item['name'] = response.xpath('//div[@class="author-info"]/div[1]/p[1]/a[1]/text()').extract_first()
        item['introduction'] = str(response.xpath('//div[@class="author-info"]/div[1]/p[2]/text()').extract_first()).strip()
        item['literature_nums'] = int(response.xpath('//div[@class="info-wrap"]/ul[@class="work-state cf"]/li[1]/em/text()').extract_first())
        item['char_total'] = response.xpath('//div[@class="info-wrap"]/ul[@class="work-state cf"]/li[2]/em/text()').extract_first()
        item['write_nums'] = int(response.xpath('//div[@class="info-wrap"]/ul[@class="work-state cf"]/li[3]/em/text()').extract_first())

        item['tag'] = response.xpath('//div[@class="book-info "]//p[@class="tag"]/span/text()').extract() + response.xpath('//div[@class="book-info "]//p[@class="tag"]/a/text()').extract()

        item['url'] = response.url



        print('《' + item['title'] + '》已爬取')

        yield item

        return item