import uuid
from datetime import datetime

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class PengpaiNewsPaiHangSpider(BaseSpider):
    name = "pengpai_news_spider_20180613_1423"
    site_id = 'bee482a6-6b2e-11e8-92cd-acbc32ce4b03'

    site_map = {
        '时事': 'https://www.thepaper.cn/channel_25950',
        '财经': 'https://www.thepaper.cn/channel_25951',
        '思想': 'https://www.thepaper.cn/channel_25952',
        '问政': 'https://www.thepaper.cn/gov_publish.jsp',
        '生活': 'https://www.thepaper.cn/channel_25953',
    }
    pengpai_news_base_url = 'https://www.thepaper.cn/%s'

    def start_requests(self):
        for category, url in self.site_map.items():
            yield scrapy.Request(url=url, callback=self.parse, meta={'category': category})

    def parse(self, response):
        category = response.meta['category']
        self.log(category)
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        news_items = response.xpath('//ul[@id="listhot0"]/li')
        for index, news_item in enumerate(news_items):
            if index >= 10:
                break
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = news_item.xpath('./a/text()').extract_first()
            rank_data_item['url'] = self.pengpai_news_base_url % news_item.xpath('./a/@href').extract_first()
            rank_data_item['rank_num'] = index + 1
            yield scrapy.Request(url=rank_data_item['url'], callback=self.parse_detail,
                                 meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        content = response.xpath('//div[@class="news_txt"]').extract_first()
        if content:
            rank_data_item['has_content'] = True
            rank_data_item['content'] = content
        else:
            rank_data_item['has_content'] = False
        yield rank_data_item
