import uuid
from datetime import datetime

import scrapy

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class WangYiNewsPaiHangSpider(BaseSpider):
    name = "wangyi_news_spider_20180611_1642"
    site_id = 'bed8be3a-6b2e-11e8-a6a7-acbc32ce4b03'

    site_map = {'新闻': 'http://news.163.com/special/0001386F/rank_news.html',
                '娱乐': 'http://news.163.com/special/0001386F/rank_ent.html',
                '体育': 'http://news.163.com/special/0001386F/rank_sports.html',
                '财经': 'http://money.163.com/special/002526BH/rank.html',
                '科技': 'http://news.163.com/special/0001386F/rank_tech.html',
                '汽车': 'http://news.163.com/special/0001386F/rank_auto.html',
                '女人': 'http://news.163.com/special/0001386F/rank_lady.html',
                '房产': 'http://news.163.com/special/0001386F/rank_house.html',
                '读书': 'http://news.163.com/special/0001386F/rank_book.html',
                '游戏': 'http://news.163.com/special/0001386F/game_rank.html',
                '旅游': 'http://news.163.com/special/0001386F/rank_travel.html',
                '教育': 'http://news.163.com/special/0001386F/rank_edu.html',
                '公益': 'http://news.163.com/special/0001386F/rank_gongyi.html',
                '校园': 'http://news.163.com/special/0001386F/rank_campus.html',
                '传媒': 'http://news.163.com/special/0001386F/rank_media.html',
                '视频': 'http://news.163.com/special/0001386F/rank_video.html',
                }

    def start_requests(self):
        for category, url in self.site_map.items():
            yield scrapy.Request(url=url, callback=self.parse, meta={'category': category})

    def parse(self, response):
        category = response.meta['category']
        self.log(category)
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        # notice   tbody 不需要，这是浏览器为了兼容标准自己添加的.
        news_items = response.xpath('/html/body/div[4]/div[2]/div/div[2]/table/tr[position()>1]')
        for index, news_item in enumerate(news_items):
            desc = news_item.xpath('./td/a/text()').extract_first()
            url = news_item.xpath('./td/a/@href').extract_first()
            if index >= 10:
                break
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = desc
            rank_data_item['url'] = url
            rank_data_item['rank_num'] = index + 1
            yield scrapy.Request(url=url, callback=self.parse_detail, meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        title = response.xpath('//div[@id="epContentLeft"]/h1[1]/text()').extract_first()
        if title:
            rank_data_item['desc'] = title
        content = response.xpath('//div[@id="endText"]').extract_first()
        if content:
            rank_data_item['has_content'] = True
            rank_data_item['content'] = content
        else:
            rank_data_item['has_content'] = False
        yield rank_data_item
