import uuid
from datetime import datetime

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class SinaPiHangSpider(BaseSpider):
    name = "sina_news_spider_20180611_1716"
    site_id = 'bedaaede-6b2e-11e8-b055-acbc32ce4b03'

    def start_requests(self):
        urls = [
            'http://news.sina.com.cn/hotnews/',
        ]
        for url in urls:
            request = scrapy.Request(url=url, callback=self.parse)
            request.meta['PhantomJS'] = True
            yield request

    def parse(self, response):
        news_table = response.xpath('//div[@class="loopblk"]')
        for news in news_table:
            category = news.xpath('./div[@class="lbti"]/h2/text()').extract_first()
            self.log(category)
            category_id = utils.get_category_id(category)
            if not category_id:
                self.log('%s 不在分类表中，忽略~' % category)
                continue
            # 这里tbody不去掉，是因为 我们使用无界面的浏览器【PhantomJS】取的JavaScript render以后的源码，PhantomJS会按浏览器标准在table里面加入tbody标签.
            news_items = news.xpath('./div[2]/table/tbody/tr[position()>1]')
            for index, row_item in enumerate(news_items):
                desc = row_item.xpath('./td[2]/a/text()').extract_first()
                if index >= 10:
                    break
                url = row_item.xpath('./td[2]/a/@href').extract_first()
                rank_data_item = RankingDataItem()
                rank_data_item['_id'] = str(uuid.uuid1())
                rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                rank_data_item['category_id'] = category_id
                rank_data_item['site_id'] = self.site_id
                rank_data_item['desc'] = desc
                rank_data_item['url'] = url
                rank_data_item['rank_num'] = index + 1
                yield scrapy.Request(url=url, callback=self.parse_detail, meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        title = response.xpath('//h1[@class="main-title"]/text()').extract_first()
        if title:
            rank_data_item['desc'] = title
        content = response.xpath('//div[@id="article"]').extract_first()
        if content:
            rank_data_item['has_content'] = True
            rank_data_item['content'] = content
        else:
            rank_data_item['has_content'] = False
        yield rank_data_item
