# -*- coding: utf-8 -*-
import scrapy
import requests
import random
import re
import time
# import sys
# sys.path.append('..')
# from DB import Book
page=280
class BookSpider(scrapy.Spider):
    name = 'book'
    allowed_domains = ['www.qidian.com']
    start_urls = ['https://www.qidian.com/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page=1'
    # , 'https://www.readnovel.com/rank/update?catId=30031&pageNum=1', 'https://www.readnovel.com/rank/update?catId=30008&pageNum=1'
    ]
    with open('story.html', 'w') as f:
        f.write('------------')

    def parse(self, response):
        global page
        time.sleep(1)
        for quote in response.css('.book-img-text li'):
            title = quote.css('h4 a::text').extract_first()
            image = quote.css('img::attr(src)').extract_first()
            desc = quote.css('.intro::text').extract_first().strip()
            url = quote.css('h4 a::attr(href)').extract_first()
            auther = quote.css('.name::text').extract_first()
            level = quote.css('a[data-eid="qd_B60"]::text').extract_first()
            levelSub = quote.css('.go-sub-type::text').extract_first()
            status = quote.css('span::text').extract_first()
            
            image = response.urljoin(image)
            url  = response.urljoin(url)
            bookId = int(re.findall(r".*info/(.+?)$", url)[0])

            with open('story.html', 'a+') as f:
              f.write('<li style="margin-bottom:20px;float:left;width:200px;"><a href="%s"><h4>%s %d</h4></a><div>%s</div><image src="%s"></li>' % (url, title, bookId,desc, image))
            # print('%s'%(title))
            yield{
              'bookId':(bookId),
              'level':(level),
              'levelSub':(levelSub),
              'status':(status),
              'title': (title),
              'image': (image),
              'desc': (desc),
              'url': (url)
            }
            if not title:
                return
            # with open('story.html', 'a') as f:
            #     f.write(title+' '+desc+' '+image+' '+url + '\r\n')
            # with open(str(random.randint(0,10000))+'.jpg', 'wb') as f:
            #     f.write(requests.get(response.urljoin(image)).content)
        # next_page = response.xpath(
        #     '//div[@class="page-box cf"]//a[@class="lbf-pagination-next "]/@href').extract_first()
        print('-----------------------------------------page: %d',page)
        page += 1
        if page <=5000:
        # if next_page is not None:
            # next_page = response.urljoin(next_page)
            next_page = 'https://www.qidian.com/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page='+str(page)
            print(next_page)
            # return response.follow(next_page, callback=self.parse)
            yield response.follow(next_page, callback=self.parse)
