#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: xaoyaoyao
@contact: xaoyaoyao@aliyun.com
@file: iheima_spider.py
@time: 2018/08/25
"""

import random

from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from scrapy.linkextractors import LinkExtractor
from article_spider.items import ArticleItem

class IheimaSpider(CrawlSpider):
    name = 'iheima'
    allowed_domains = ['iheima.com']
    start_urls = [
        'http://www.iheima.com/',
        'http://www.iheima.com/scope/1',
        'http://www.iheima.com/scope/77',
        'http://www.iheima.com/scope/78',
        'http://www.iheima.com/scope/79',
        'http://www.iheima.com/scope/80',
        'http://www.iheima.com/scope/81',
        'http://www.iheima.com/scope/82',
        'http://www.iheima.com/scope/49',
        'http://www.iheima.com/scope/83',
        'http://www.iheima.com/scope/84',
        'http://www.iheima.com/scope/2',
        'http://www.iheima.com/scope/85',
        'http://www.iheima.com/scope/86',
        'http://www.iheima.com/scope/89',
        'http://www.iheima.com/scope/87',
    ]

    def parse(self, response):
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        try:
            d_link = LinkExtractor(restrict_xpaths='//div[@class="list"]//div[contains(@class,"item-wrap")]//div[contains(@class,"distable-cell")]//a[@class="title"]')
            if d_link:
                links = d_link.extract_links(response)
                if links:
                    for detail_link in links:
                        if detail_link:
                            yield Request(url=detail_link.url, callback=self.parse_detail)
        except Exception as e:
            self.logger.error("the link detail error. The msg %s", str(e))


    def parse_detail(self, response):
        article_item = ArticleItem()
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        article_item['url'] = current_url
        # title
        rt_title = response.css('div.mainleft div.main-content div.title::text').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        article_item['title'] = title
        # content
        rt_content = response.css('div.mainleft div.main-content p::text').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        article_item['content'] = content
        article_item['type'] = '热点'
        article_item['name'] = 'i黑马网'
        article_item['date'] = None
        article_item['grade'] = random.randint(88, 95)
        article_item['domain'] = 'iheima.com'
        self.logger.info('article_item >> %s', article_item)
        yield article_item