import scrapy
from first_demo.items import FirstDemoItem


class DuanziSpider(scrapy.Spider):
    name = "duanzi"
    allowed_domains = ["www.duanzi.cn"]
    start_urls = ["https://www.duanzi.cn/"]

    # 模拟登录，这个是在发动spider之前写入的一些请求头参数
    # def start_requests(self):
    #     cookie_str = '_device_id=4ebf5cf9d61d287967f5baa32279c615; _octo=GH1.1.832940153.1710335388; saved_user_sessions=138430691%3AtR6CCE9xh4TMJLYNIuafDjXFYQsSqNr3a7eedd5UO2CbrWAy; user_session=tR6CCE9xh4TMJLYNIuafDjXFYQsSqNr3a7eedd5UO2CbrWAy; __Host-user_session_same_site=tR6CCE9xh4TMJLYNIuafDjXFYQsSqNr3a7eedd5UO2CbrWAy; logged_in=yes; dotcom_user=MrwuUser; has_recent_activity=1; color_mode=%7B%22color_mode%22%3A%22auto%22%2C%22light_theme%22%3A%7B%22name%22%3A%22light%22%2C%22color_mode%22%3A%22light%22%7D%2C%22dark_theme%22%3A%7B%22name%22%3A%22dark%22%2C%22color_mode%22%3A%22dark%22%7D%7D; preferred_color_mode=light; tz=Asia%2FShanghai; _gh_sess=tDYBi4hs0XBZvxv23jPUdprKUjOi%2FWgDn2hTzdi1jJ1AjJy2wXQhY8NCSS9BXkwDBw0KKWc0mxhXGgZfb060RchFZemzyNLGBFpTVZW8g8ZqSDoTl2gn7fy8MngxOWJV2TS9CY1Q1F%2Fk6dy9rEYM7f5gHtCKm7hLVDXoMeh%2FmNuFA9lOzkkgcEBYRVOHPUlGVCweuHPTEXqmnNSaLEFmUUshrp6z0A1WhFilkrhFkYSvW034dINx2w2nw%2FkoxszHxUW%2FaUSWkykgs4mqxTNGz3nuegNv%2BB%2B13g72%2FvvpTB1XWqRPidX2zT2KBAelG5WtUb6UxJoW9JWtoNcMl0WNDFK9YIQOqNlM%2Fy5FA8YSZc5QS%2FNWKW%2BgsH5%2BuZ%2Bjla6O--pE7hMJwl3FdkGH1s--e1mn%2FMQPEaScnxNFTyQ%2Bjw%3D%3D'
    #     cookie_dic = {cookie_str.strip('=')[0]:cookie_str.strip('=')[-1]for i in cookie_str.split('; ')}
    #     yield scrapy.Request(
    #         url=self.start_urls[0],
    #         callback=self.parse,
    #         cookies=cookie_dic
    #     )

    def parse(self, response):
        duanzi_list = response.xpath('//*[@id="sticky"]/div[1]/div[1]/div')
        print(duanzi_list)
        for duanzi in duanzi_list:
            item =FirstDemoItem()
            item['title'] = duanzi.xpath('./h2/a/text()').extract_first()
            item['author'] = duanzi.xpath('./div/span[1]/a/text()').extract_first()
            item['views'] = duanzi.xpath('./div/span[3]/text()').extract_first()
            item['link'] = duanzi.xpath('./h2/a/@href').extract_first()
            # print(item)
            # yield item
        # 翻页爬取详情页
            yield scrapy.Request(
                url=item['link'],
                callback=self.parse_content,
                meta={'item':item}
            )
        # 翻页模拟
        # <a href="https://www.duanzi.cn/page/10/">10</a>
        for num in range(2,6):
            part_url = f"https://www.duanzi.cn/page/{num}/"
            yield scrapy.Request(
                url=part_url,
                callback=self.parse
            )
    def parse_content(self,response):
        item = response.meta['item']
        item['type'] = response.xpath('//*[@id="sticky"]/div[1]/div[1]/div[1]/div[1]/span[3]/a/text()').extract_first()
        item['content'] = response.xpath('//*[@id="sticky"]/div[1]/div[1]/div[1]/div[2]/p/text()').extract()
        yield item

