import json
import time
import uuid
from datetime import datetime

import scrapy
from bs4 import BeautifulSoup

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class CCTVNewsPaiHangSpider(BaseSpider):
    name = "cctv_news_spider_20180612_1834"
    site_id = 'bee2f7cc-6b2e-11e8-8eea-acbc32ce4b03'

    site_map = {
        '娱乐': 'http://news.cctv.com/ent/data/index.json',
        '国内': 'http://news.cctv.com/china/data/index.json',
        '国际': 'http://news.cctv.com/world/data/index.json',
        '军事': 'http://military.cctv.com/data/index.json',
        '科技': 'http://news.cctv.com/tech/data/index.json',
        '社会': 'http://news.cctv.com/society/data/index.json',
        '法治': 'http://news.cctv.com/law/data/index.json',
        '经济': 'http://jingji.cctv.com/data/index.json',
    }
    china_news_base_url = 'http://www.chinanews.com%s'

    def start_requests(self):
        for category, url in self.site_map.items():
            yield scrapy.Request(url=url + '?r=' + str(time.time()), callback=self.parse, meta={'category': category})

    def parse(self, response):
        category = response.meta['category']
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        news_json = json.loads(response.body_as_unicode())
        for index, news_item in enumerate(news_json['rollData']):
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = news_item['title']
            rank_data_item['url'] = news_item['url']
            rank_data_item['rank_num'] = index + 1
            yield scrapy.Request(url=rank_data_item['url'], callback=self.parse_detail,
                                 meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        rank_data_item['desc'] = response.xpath('//div[@class="cnt_bd"]/h1/text()').extract_first()
        soup = BeautifulSoup(response.xpath('//div[@class="cnt_bd"]').extract_first(), "lxml")
        # 期待有更pretty的形式，来删除标签
        # 视频类型 http://news.cctv.com/2018/06/20/ARTIDmqiBJPTxAar3a7VSHE1180620.shtml
        remove = []
        [remove.append(node) for node in soup(['script', 'h1', 'h2'])]
        [remove.append(node) for node in soup('div', class_='function')]
        [remove.append(node) for node in soup('p', class_='o-tit')]
        [node.extract() for node in remove]
        if soup.div:
            rank_data_item['has_content'] = True
            rank_data_item['content'] = str(soup.div).replace('\n', '')
        else:
            rank_data_item['has_content'] = False
        yield rank_data_item
