import uuid
from datetime import datetime

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class PeopleNewsPaiHangSpider(BaseSpider):
    name = "people_news_spider_20180611_1243"
    site_id = 'bed67710-6b2e-11e8-89b6-acbc32ce4b03'

    def start_requests(self):
        urls = [
            'http://news.people.com.cn/GB/28053/',
        ]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        news_table = response.xpath('/html/body/center/table[5]/tr[1]/td[2]/table[4]/tr/td[1]')
        for news in news_table:
            category = str(news.xpath('./table[1]/tr[1]/td[2]/text()').extract_first()).replace('    ', '')
            self.log(category)
            category_id = utils.get_category_id(category)
            if not category_id:
                self.log('%s 不在分类表中，忽略~' % category)
                continue
            items = news.xpath('./table[2]/tr/td[2]/a')
            for index, item in enumerate(items):
                rank_data_item = RankingDataItem()
                rank_data_item['_id'] = str(uuid.uuid1())
                rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                rank_data_item['category_id'] = category_id
                rank_data_item['site_id'] = self.site_id
                rank_data_item['desc'] = item.xpath('./text()').extract_first()
                rank_data_item['url'] = item.xpath('./@href').extract_first()
                rank_data_item['rank_num'] = index + 1
                yield scrapy.Request(url=rank_data_item['url'], callback=self.parse_detail,
                                     meta={'rank_data_item': rank_data_item})
        # 每个节点为 title + content 这样可以更好的分解数据，以后所有的形式都弄成这样的。
        # notice
        # 解决办法：去掉xpath中的tbody即可。
        # 原因：浏览器复制的xpath会自动优化，自己加上tbody，其实网页源代码里是没有的。
        # tr = html.xpath(".//tr")  # 搜索当前节点下的所有tr必须加上'.', 否则搜索的是整个文档的所有tr
        # 用 可用IP 解析video地址

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        content = response.xpath('//div[@id="rwb_zw"]').extract_first()
        if content:
            rank_data_item['has_content'] = True
            rank_data_item['content'] = content
        else:
            rank_data_item['has_content'] = False
        yield rank_data_item
