# -*- coding: utf-8 -*-
# @Time    : 2018/12/4 16:16
# @Author  : zjj
# @Email   : 1933860854@qq.com
# @File    : qiDianMainSpider.py
# @Software: PyCharm
'''
    起点中文网小说，此网的数据排列比较规则，合适新手入门使用。
'''
import scrapy
from iQiYiSpider.userAgent import USER_AGENT_LIST
import random
from iQiYiSpider.items import QiDianSpiderItem
class QiDianScrapySpider(scrapy.Spider):
    name = 'qiDianSpider'
    allowed_domains = ['www.qidian.com']
    start_urls = ['https://www.qidian.com/all']
    headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    'Cookie': '_csrfToken=vLWv1HuR8LZkq2m00E42B91mhfgP7pgakRrFNamA; newstatisticUUID=1543911168_1943773111; PAGE_DAILY_MODAL=4; e2=%7B%22pid%22%3A%22qd_P_all%22%2C%22eid%22%3A%22qd_C44%22%2C%22l1%22%3A5%7D; e1=%7B%22pid%22%3A%22qd_P_all%22%2C%22eid%22%3A%22qd_A15%22%2C%22l1%22%3A3%7D',
    'Host': 'www.qidian.com',
    'Referer': 'https://www.qidian.com/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page=1',
    'Upgrade-Insecure-Requests': 1,
    'User-Agent': random.choice(USER_AGENT_LIST),
    }
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url=url, headers=self.headers, dont_filter=True)

    def parse(self, response):
        try:
            results = response.xpath('.//ul[@class="all-img-list cf"]/li')
            next_urls = response.xpath('.//ul[@class="lbf-pagination-item-list"]/li/a/@href').extract()
            if len(results) > 0:
                for result in results:
                    item = QiDianSpiderItem()
                    item['note_name'] = result.xpath('.//div[@class="book-mid-info"]/h4/a/text()').extract()[0]
                    item['note_url'] = 'https:' + result.xpath('.//div[@class="book-img-box"]/a/@href').extract()[0]
                    item['note_img'] = 'https:' + result.xpath('.//div[@class="book-img-box"]/a/img/@src').extract()[0]
                    item['note_author'] = result.xpath('.//div[@class="book-mid-info"]/p[@class="author"]/a/text()').extract()[0]
                    item['note_type'] = result.xpath('.//div[@class="book-mid-info"]/p[@class="author"]/a/text()').extract()[1] + '|' + result.xpath('.//div[@class="book-mid-info"]/p[@class="author"]/a/text()').extract()[2]
                    item['note_status'] = result.xpath('.//div[@class="book-mid-info"]/p[@class="author"]/span/text()').extract()[0]
                    item['note_abstract'] = result.xpath('.//div[@class="book-mid-info"]/p[@class="intro"]/text()').extract()[0].replace('\n', '').replace(' ', '').replace('\r', '')
                    item['note_word_num'] = '未知'#result.xpath('.//div[@class="book-mid-info"]/p[@class="update"]/span/span/text()').extract()[0] + result.xpath('.//div[@class="book-mid-info"]/p[@class="update"]/span/text()').extract()[0]
                    yield item
            if len(next_urls) > 0:
                length = len(next_urls)
                if 'javascript:;' in next_urls[length-1]:
                    print('-------------没有下一页了------------------------------')
                else:
                    yield scrapy.Request(
                        url='https:' + next_urls[length-1],
                        callback=self.parse,
                        headers=self.headers,
                        dont_filter=True
                    )
        except Exception as e:
            print('-----------------终止解析-----------------', e)

