# -*- coding: utf-8 -*-
import scrapy
import requests
import random

page=0
class StorySpider(scrapy.Spider):
    name = 'story'
    allowed_domains = ['www.readnovel.com']
    start_urls = ['https://www.readnovel.com/rank/update?pageNum=1'
    # , 'https://www.readnovel.com/rank/update?catId=30031&pageNum=1', 'https://www.readnovel.com/rank/update?catId=30008&pageNum=1'
    ]
    with open('story.html', 'w') as f:
        f.write('------------')

    def parse(self, response):
        global page
        for quote in response.xpath('//div[@class="book-img-text"]//li'):
            title = quote.xpath(
                "./div[@class='book-mid-info']//a/text()").extract_first()
            image = quote.xpath(
                "//div[@class='book-img-box']//img/@src").extract_first()
            desc = quote.xpath(
                "//div[@class='book-mid-info']/p/text()").extract_first()
            url = quote.xpath(
                '//div[@class="book-mid-info"]//a/@href').extract_first()
            yield{
                'title': (title),
                'image': (image),
                'desc': (desc),
                'url': (url)
            }
            if not title:
                return
            with open('story.html', 'a') as f:
                f.write(title+' '+desc+' '+image+' '+url + '\r\n')
            with open(str(random.randint(0,10000))+'.jpg', 'wb') as f:
                f.write(requests.get(response.urljoin(image)).content)
        # next_page = response.xpath(
        #     '//div[@class="page-box cf"]//a[@class="lbf-pagination-next "]/@href').extract_first()
        print('-----------------------------------------')
        page += 1
        if page <=50:
        # if next_page is not None:
            # next_page = response.urljoin(next_page)
            next_page = 'https://www.readnovel.com/rank/update?pageNum='+str(page)
            print(next_page)
            yield response.follow(next_page, callback=self.parse)
