import uuid
from datetime import datetime

import scrapy
from bs4 import BeautifulSoup

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class SouhuNewsPaiHangSpider(BaseSpider):
    name = "souhu_news_spider_20180612_1804"
    site_id = 'bee04948-6b2e-11e8-9933-acbc32ce4b03'

    site_map = {
        '军事': 'http://mil.sohu.com/',
        '财经': 'http://business.sohu.com/',
        '时尚': 'http://fashion.sohu.com/',
        '科技': 'http://it.sohu.com/',
        '旅游': 'http://travel.sohu.com/',
        '游戏': 'http://game.sohu.com/',
        '社会': 'http://society.sohu.com/',
    }

    def start_requests(self):
        for category, url in self.site_map.items():
            yield scrapy.Request(url=url, callback=self.parse, meta={'category': category})

    def parse(self, response):
        category = response.meta['category']
        category_id = utils.get_category_id(category)
        if not category_id:
            self.log('%s 不在分类表中，忽略~' % category)
            return
        news_items = response.xpath('//div[@class="news-list clearfix"]/div/h4/a')
        for index, news_item in enumerate(news_items):
            desc = news_item.xpath('string(.)').extract_first().strip()
            url = news_item.xpath('./@href').extract_first()
            rank_data_item = RankingDataItem()
            rank_data_item['_id'] = str(uuid.uuid1())
            rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            rank_data_item['category_id'] = category_id
            rank_data_item['site_id'] = self.site_id
            rank_data_item['desc'] = desc
            rank_data_item['url'] = 'http:' + url
            rank_data_item['rank_num'] = index + 1
            yield scrapy.Request(url=rank_data_item['url'], callback=self.parse_detail,
                                 meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        soup = BeautifulSoup(response.body.decode(response.encoding), "lxml")
        content_tag = soup.find('article', class_='article')
        if content_tag:
            remove_tags = []
            delete_tag = content_tag('a', id='backsohucom')
            if delete_tag:
                [remove_tags.append(node) for node in delete_tag]
            delete_tag = content_tag('p', attrs={'data-role': 'editor-name'})
            if delete_tag:
                [remove_tags.append(node) for node in delete_tag]
            [node.extract() for node in remove_tags]
            content = str(content_tag).replace('\n', '')
            rank_data_item['has_content'] = True
            rank_data_item['content'] = content
            yield rank_data_item
        else:
            self.log('不支持此种类型的详情页数据抓取.')
