import scrapy
from scrapy import Request


class BqgSpider(scrapy.Spider):
    name = 'bqg'
    allowed_domains = ['bbiquge.net']
    # start_urls = ['https://www.bbiquge.net/fenlei/1_1/']

    # 获取初始页面，得到不同类型的小说页面
    def start_requests(self):

        start_urls = 'https://www.bbiquge.net/top/monthvisit/'

        yield Request(start_urls)

    # 获取小说具体url地址
    def parse(self, response):
        noval_urls = response.xpath('//div[@id="articlelist"]//li/span[2]//@href').getall()
        for n_url in noval_urls:
            yield Request(n_url, callback=self.deal_url_frist)


    # # 获取是否有上一页，若无则传递全部内容，否则只传递小说名称与章节名称
    # def deal_url(self, response):
    #
    #     pre_url = response.xpath('//span[@class="input-group-btn"][1]/a/@href').get()
    #
    #     url = response.url
    #     if pre_url=='javascript:':
    #         # 返回时默认不允许重复！！！！！
    #
    #         yield Request(url, callback=self.deal_url_frist, dont_filter=True)
    #     else:
    #
    #         yield Request(url, callback=self.deal_url_title, dont_filter=True)


    # 获取小说具体页面1数据
    def deal_url_frist(self, response):
        img_url = response.xpath('//div[@class="img_in"]//@src').get()
        noval_name = response.xpath('//div[@class="img_in"]//@title').get()
        # 去除前面 / 以及两边空字符
        author = response.xpath('string(//h1//small)').get().replace("/", "", 1).strip()
        category = response.xpath('//div[@class="nav-mbx"]/a[2]/text()').get()
        noval_titles = response.xpath('//dd/a/text()').getall()
        next_url = response.xpath('//span[@class="input-group-btn"][2]/a/@href').get()
        img_name = img_url.split('/')[-1]
        yield {
            'imgurl': img_url,
            'noval_name': noval_name,
            'author': author,
            'noval_titles': noval_titles,
            'category': category,
            'img_name': img_name,
            # 'next_url': next_url,
        }
        # 判断是否有下一页
        if next_url!='javascript:':
            # response.urljoin  添加url前缀
            url = response.urljoin(next_url)
            yield Request(url, callback=self.deal_url_title)


     # 获取小说具体页面2数据,只获取标题
    def deal_url_title(self, response):
        noval_name = response.xpath('//div[@class="img_in"]//@title').get()
        noval_titles = response.xpath('//dd/a/text()').getall()
        next_url = response.xpath('//span[@class="input-group-btn"][2]/a/@href').get()
        yield {
            'noval_name': noval_name,
            'noval_titles': noval_titles,

        }
        # 判断是否有下一页,有则返回给自己
        if next_url != 'javascript:':
            # response.urljoin  添加url前缀
            url = response.urljoin(next_url)
            yield Request(url, callback=self.deal_url_title)


