import uuid
from datetime import datetime

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class ToutiaoPiHangSpider(BaseSpider):
    name = "toutiao_news_spider_20180611_1838"
    site_id = 'bedc593a-6b2e-11e8-9d46-acbc32ce4b03'

    site_map = {'科技': 'https://www.toutiao.com/ch/news_tech/',
                '娱乐': 'https://www.toutiao.com/ch/news_entertainment/',
                '互联网': 'https://www.toutiao.com/ch/internet/',
                '游戏': 'https://www.toutiao.com/ch/news_game/',
                '体育': 'https://www.toutiao.com/ch/news_sports/',
                '汽车': 'https://www.toutiao.com/ch/news_car/',
                '财经': 'https://www.toutiao.com/ch/news_finance/',
                '搞笑': 'https://www.toutiao.com/ch/funny/',
                '军事': 'https://www.toutiao.com/ch/news_military/',
                '国际': 'https://www.toutiao.com/ch/news_world/',
                '时尚': 'https://www.toutiao.com/ch/news_fashion/',
                '旅游': 'https://www.toutiao.com/ch/news_travel/',
                '探索': 'https://www.toutiao.com/ch/news_discovery/',
                '历史': 'https://www.toutiao.com/ch/news_history/',
                '美食': 'https://www.toutiao.com/ch/news_food/',
                }
    toutiao_base_path = 'https://www.toutiao.com%s'

    def start_requests(self):
        for category, url in self.site_map.items():
            request = scrapy.Request(url=url, callback=self.parse, meta={'category': category})
            request.meta['PhantomJS'] = True
            yield request

    def parse(self, response):
        news_items = response.xpath('//div[@class="wcommonFeed"]/ul/li')
        category = response.meta['category']
        self.log(category)
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        for index, row_item in enumerate(news_items):
            desc = row_item.xpath('./div/div/div/div/a/text()').extract_first()
            if index >= 10:
                break
            if desc is None:
                continue
            url = self.toutiao_base_path % row_item.xpath('./div/div/div/div/a/@href').extract_first()
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = desc
            rank_data_item['url'] = url
            rank_data_item['rank_num'] = index + 1
            self.log(rank_data_item)
            yield rank_data_item
