#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2018/1/18 0018 9:44
# @Author  : Arliki
# @email   : hkdnxycz@outlook.com
# @File    : for_xiuhu
from sct2.items import XiuhuItem
import scrapy


class Xiuhu(scrapy.Spider):
    name = 'huxiu'
    allowed_domains = ["huxiu.com"]
    start_urls = [
        "http://www.huxiu.com/index.php"
    ]

    def parse(self, response):
        for box in response.xpath('//div[@class="mod-info-flow"]/div/div[@class="mob-ctt"]'):
            item = XiuhuItem()
            item['title'] = box.xpath('h2/a/text()')[0].extract()
            try:
                item['content'] = box.xpath('div[@class="mob-sub"]/text()')[0].extract()
            except:
                item['content'] = "无法获取到简介"
            item['url'] = box.xpath('h2/a/@href')[0].extract()
            urls = response.urljoin(item['url'])
            yield scrapy.Request(urls, callback=self.article, meta=item)

    def article(self, response):
        detail = response.xpath('//div[@class="article-wrap"]')
        item = response.meta
        print("--------------------------------------新闻详情")
        if item['title']:
            pass
        else:
            item['title'] = detail.xpath('h1/text()')[0].extract().strip()
        item['url'] = response.url
        try:
            item['sendtime'] = \
            detail.xpath('div[@class="article-author"]/div/span[@class="article-time pull-left"]/text()')[0].extract()
        except:
            item['sendtime'] = "未获取到时间"
        print(item['title'], item['url'], item['sendtime'], item['content'])
        yield item
