# -*- coding: UTF-8 -*-
import scrapy
import messAround.util.help as util


# @meta   微博热搜
# @source http://s.weibo.com/top/summary
# @cmd    scrapy crawl weibo_hot
class WeiboHotSpider(scrapy.Spider):
    name = 'weibo_hot'

    allowed_domains = ['s.weibo.com/top/summary']

    start_urls = ['http://s.weibo.com/top/summary/']

    custom_settings = {
        'ITEM_PIPELINES': {
            'messAround.pipeline.weibo.WeiboHotPipeline': 300
        }
    }

    def start_requests(self):
        urls = [
            {"type": 'search', "path": "https://s.weibo.com/top/summary"},
            {"type": 'news', "path": "https://s.weibo.com/top/summary?cate=socialevent"},
        ]

        for url in urls:
            yield scrapy.Request(url=url['path'], meta={'type': url['type']}, callback=self.parse,
                                 headers=util.default_headers)

    def parse(self, response):

        for index in range(1, 51):
            item_xpath = f'/html/body/div[1]/div[2]/div[2]/table/tbody/tr[{index}]/td[2]'
            title = response.xpath(f'{item_xpath}/a/text()').get()
            link = response.xpath(f'{item_xpath}/a/@href').get()
            yield {
                'no': index,
                'category': response.meta['type'],
                'title': title,
                'link': f'https://s.weibo.com/{link}',
                'hot_index': 0,
            }
        pass
