import scrapy
from scrapy import Request
import time
# scrapy crawl qidian -o qidian.csv
from demo2.items import qidianItem


class QidianSpider(scrapy.Spider):
    name = "qidian"
    allowed_domains = ["www.qidian.com"]
    start_urls = ["https://www.qidian.com/rank/yuepiao"]

    def start_requests(self):
        headers = {

            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',

            'Cookie': 'e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A%22%22%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank_19%22%2C%22eid%22%3A%22%22%7D; e2=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A%22%22%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank_19%22%2C%22eid%22%3A%22%22%7D; e2=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A5%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank%22%2C%22eid%22%3A%22qd_C45%22%7D; e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A5%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_P_rank%22%2C%22eid%22%3A%22qd_C40%22%7D; _gid=GA1.2.1196685488.1741151965; e1=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A16%22%7D; supportwebp=true; e2=%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A16%22%7D; newstatisticUUID=1741161304_101829500; fu=2053965971; _csrfToken=XzKu0j2fCb0B2gb9N5emz5WcFikxozfMWjla8BM6; traffic_utm_referer=; Hm_lvt_f00f67093ce2f38f215010b699629083=1741161305,1741178034,1741179590,1741226470; HMACCOUNT=68D8E02D8496380D; _ga_FZMMH98S83=GS1.1.1741264648.6.1.1741268511.0.0.0; _ga_PFYW0QLV3P=GS1.1.1741264648.6.1.1741268511.0.0.0; _ga=GA1.2.882852402.1741151964; Hm_lpvt_f00f67093ce2f38f215010b699629083=1741270350; w_tsfp=ltvuV0MF2utBvS0Q7avrk0utEjElczg4h0wpEaR0f5thQLErU5mB0IR9vsjxMXLW48xnvd7DsZoyJTLYCJI3dwNBQp3HIY5HjQqTxoMm2oYTBBJmQpyNXVdKJbMg7DJPe3hCNxS00jA8eIUd379yilkMsyN1zap3TO14fstJ019E6KDQmI5uDW3HlFWQRzaLbjcMcuqPr6g18L5a5T2JtFj9f1pyVrtC1RHE0SBNDHEktBG9J78IPU78d5itSqA='

        }
        # def start_requests(self):
        #     for page in range(10):
        #         yield Request(url=f'https://movie.douban.com/top250?start={page * 25}')
        for i in range(1):
            yield Request(url=f'https://www.qidian.com/rank/yuepiao/year2025-month03-page{i+1}/',headers=headers)
        # for url in self.start_urls:
        #     # yield Request(url=url, headers=headers,callback=self.parse)
        #     yield Request(url=url, callback=self.parse,headers=headers)


    def parse(self, response):
        response = response.replace(encoding='utf-8')
        # if response.status == 202:
        #     # 等待一段时间conda update lxml parsel scrapy后重新请求
        #     time.sleep(10)  # 等待10秒
        #     yield scrapy.Request(response.url, callback=self.parse)
        # else:
        #     open("qidian.html", "wb").write(response.body)
        open(f"qidian{time.time()}.html", "wb").write(response.body)
        list_items = response.css('#book-img-text > ul > li')
        for book_sel in list_items:
            item = qidianItem()

            # 提取书名
            book_name = book_sel.css('div.book-mid-info h2 a::text').get()
            # 提取作者
            author = book_sel.css('p.author a.name::text').get()
            # 提取分类
            categories = book_sel.css('p.author a:not(.name)::text').getall()
            category_str = '·'.join(categories)
            # 提取简介
            intro = book_sel.css('p.intro::text').get().strip()
            # 提取最新更新章节
            latest_chapter = book_sel.css('p.update a::text').get()
            latest_chapter = latest_chapter.replace("最新更新 ", "")
            # 提取更新时间
            update_time = book_sel.css('p.update span::text').get()

            item['book_name'] = book_name
            item['author'] = author
            item['categories'] = category_str
            item['latest_chapter'] = latest_chapter
            item['update_time'] = update_time
            item['intro'] = intro
            # item['yp'] = str(yp)
            yield item