import uuid
from datetime import datetime
import time

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class BaiduNewsPaiHangSpider(BaseSpider):
    name = "baidu_news_spider_20180612_1714"
    site_id = 'beddc10a-6b2e-11e8-8a86-acbc32ce4b03'
    baidu_base_url = 'http://news.baidu.com/widget?id=%s&t=%s'
    baidu_widgets = ['civilnews', 'InternationalNews', 'EnterNews', 'SportNews', 'FinanceNews', 'TechNews',
                     'MilitaryNews', 'InternetNews', 'DiscoveryNews', 'LadyNews', 'HealthNews']

    def start_requests(self):
        for widget in self.baidu_widgets:
            yield scrapy.Request(url=self.baidu_base_url % (widget, time.time()), callback=self.parse)

    def parse(self, response):
        category = response.xpath('//div/div/div/h2/*[1]/text()').extract_first()
        self.log(category)
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        descs = response.xpath('//ul[@class="ulist focuslistnews"]/li/a/text()').extract()
        urls = response.xpath('//ul[@class="ulist focuslistnews"]/li/a/@href').extract()
        for index, desc in enumerate(descs):
            if index >= 10:
                break
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = desc
            rank_data_item['url'] = urls[index]
            rank_data_item['rank_num'] = index + 1
            self.log(rank_data_item)
            yield rank_data_item
