import uuid
from datetime import datetime

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class ChinaNewsPaiHangSpider(BaseSpider):
    name = "china_news_spider_20180612_1809"
    site_id = 'bee14e88-6b2e-11e8-ac08-acbc32ce4b03'

    site_map = {
        '财经': 'http://finance.chinanews.com/cj/gd.shtml',
        '国内': 'http://www.chinanews.com/china.shtml',
        '国际': 'http://www.chinanews.com/world.shtml',
        '军事': 'http://www.chinanews.com/mil/news.shtml',
        '社会': 'http://www.chinanews.com/society.shtml',
        '娱乐': 'http://www.chinanews.com/entertainment.shtml',
        '体育': 'http://www.chinanews.com/ty/gun-news.html',
    }
    china_news_base_url = 'http://www.chinanews.com%s'

    def start_requests(self):
        for _, url in self.site_map.items():
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        category = response.xpath('//div[@id="content_right"]/div/h1/text()').extract_first()
        self.log(category)
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        descs = response.xpath(
            '//div[@id="content_right"]/div[@class="content_list"]/ul/li[not(@id="konge")]/div[2]/a/text()').extract()
        urls = response.xpath(
            '//div[@id="content_right"]/div[@class="content_list"]/ul/li[not(@id="konge")]/div[2]/a/@href').extract()
        for index, desc in enumerate(descs):
            if index >= 10:
                break
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = desc
            rank_data_item['url'] = self.china_news_base_url % urls[index]
            rank_data_item['rank_num'] = index + 1
            yield scrapy.Request(url=rank_data_item['url'], callback=self.parse_detail,
                                 meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        # 图片
        tupian_div = response.xpath('//div[@id="tupian_div"]').extract_first()
        # 正文
        left_zw = response.xpath('//div[@class="left_zw"]').extract_first()
        if left_zw:
            content = '<div id=content>%s%s</div>' % (tupian_div, left_zw)
            rank_data_item['has_content'] = True
            rank_data_item['content'] = content
        else:
            rank_data_item['has_content'] = False
        yield rank_data_item
