import uuid
from datetime import datetime

import scrapy
from bs4 import BeautifulSoup

from rankinglistspider import utils
from rankinglistspider.items import RankingDataItem
from rankinglistspider.spiders.base_spider import BaseSpider


class IFengPaiHangSpider(BaseSpider):
    name = "ifeng_news_spider_20180611_1625"
    site_id = 'bed3d9a6-6b2e-11e8-897e-acbc32ce4b03'

    def start_requests(self):
        urls = [
            'http://news.ifeng.com/hotnews/',
        ]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        news_table = response.xpath('/html/body/div[contains(@class, "boxTab")]')
        for news in news_table:
            category = news.xpath('./div[1]/span/text()').extract_first()
            self.log(category)
            category_id = utils.get_category_id(category)
            if not category_id:
                self.log('%s 不在分类表中，忽略~' % category)
                continue
            news_items = news.xpath('./div[2]/div[1]/table/tr')
            for index, row_item in enumerate(news_items):
                desc = row_item.xpath('./td[2]/h3/a/text()').extract_first()
                if index == 0:
                    continue
                url = row_item.xpath('./td[2]/h3/a/@href').extract_first()
                rank_data_item = RankingDataItem()
                rank_data_item['_id'] = str(uuid.uuid1())
                rank_data_item['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                rank_data_item['category_id'] = category_id
                rank_data_item['site_id'] = self.site_id
                rank_data_item['desc'] = desc
                rank_data_item['url'] = url
                rank_data_item['rank_num'] = index
                if not desc:
                    continue
                yield scrapy.Request(url=rank_data_item['url'], callback=self.parse_detail,
                                     meta={'rank_data_item': rank_data_item})

    def parse_detail(self, response):
        rank_data_item = response.meta['rank_data_item']
        if response.xpath('//div[@class="hdpPic"]'):
            rank_data_item['desc'] = response.xpath('//div[@id="titL"]/h1/text()').extract_first()
            # 图片content
            # http://news.ifeng.com/a/20180602/58551477_0.shtml#p=1
            yield rank_data_item
        elif response.xpath('//div[@id="main_content"]').extract_first():
            rank_data_item['desc'] = response.xpath('//h1[@id="artical_topic"]/text()').extract_first()
            # 正常content
            soup = BeautifulSoup(response.xpath('//div[@id="main_content"]').extract_first(), "lxml")
            # 期待有更pretty的形式，来删除标签
            remove = []
            [remove.append(node) for node in soup(['script'])]
            [node.extract() for node in remove]
            if soup.div:
                rank_data_item['has_content'] = True
                rank_data_item['content'] = str(soup.div).replace('\n', '')
            else:
                rank_data_item['has_content'] = False
            yield rank_data_item
