# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.import scrapy

import scrapy, hashlib, datetime
import pymongo
import re


class TestSpider(scrapy.Spider):
    name = 'List'

    def __init__(self) -> None:
        self.start_urls = [
            'https://www.ciweimao.com/rank-index/yp_new-week'
        ]
   
    def start_requests(self):
        yield scrapy.Request(
            url=self.start_urls[0],
            callback=self.parse_list
        )

    def parse_list(self,response):
        print('-----------------------------------------------',response)
        for li in response.xpath('//ol[@class="rank-book-list"]/li'):
            section_url = li.xpath('.//div[@class="cnt"]/h3/a/@href').get()
            url_md5 = hashlib.md5(section_url.encode()).hexdigest()
            current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    

            yield{
                'title': li.xpath('.//div[@class="cnt"]/h3/a/@title').get(), #标题
                'author': li.xpath('.//div[@class="cnt"]/p/a/text()').get(), #作者
                'update_date': li.xpath('.//div[@class="cnt"]/p[2]/text()').get(), #最近更新时间
                'section_url': section_url, #章节列表地址
                'url_md5': url_md5, #url的MD5编码，primary_key 唯一值
                'summary': li.xpath('.//div[@class="cnt"]/p[3]/text()').get(), #摘要
                'crawl_time': current_time, #采集时间
                'section_crawled': 1, #详情页是否已采集标记 （0未采集,1已采集,2已解析）
            }
        next_page = response.xpath("//a[@rel='next']/@href").get()
        print("==================================>>>>>>>>>>>>>",next_page)
        if next_page:
            yield response.follow(next_page, callback=self.parse_list)

