# -*- coding: UTF-8 -*-
import scrapy
import messAround.util.help as util


# 豆瓣热门话题
# https://www.douban.com/gallery
class DoubanTopSpider(scrapy.Spider):
    name = 'douban_topic'

    allowed_domains = ['www.douban.com/gallery']

    start_urls = ['https://www.douban.com/gallery/']

    headers = {
        'User-Agent': util.agent,
        'Host': 'www.douban.com',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same - origin',
        'Sec-Fetch-User': '?1',
        'Upgrade-Insecure-Requests': '1',
    }

    custom_settings = {
        'ITEM_PIPELINES': {
            'messAround.pipeline.douban.DoubanTopicPipeline': 300
        }
    }

    def start_requests(self):
        url = "https://www.douban.com/gallery/"
        yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)

    def parse(self, response):
        for index in range(1, 10):
            item_xpath = '//*[@id="content"]/div/div[2]/div[3]/ul/li[' + str(index) + ']/'
            title = response.xpath(item_xpath + 'a/text()').get()
            link = response.xpath(item_xpath + 'a/@href').get()
            hot_index = response.xpath(item_xpath + 'span/text()').get()
            yield util.make_data({
                'source': 0,
                'no': index,
                'title': title,
                'link': link,
                'hot_index': hot_index,
            })
        pass
