#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:xaoyaoyao
@file: geekpark_spider.py
@time: 2018/08/19
"""

import random

from scrapy.http import Request
from scrapy.spiders import CrawlSpider

from article_spider.items import ArticleItem


class GeekparkSpider(CrawlSpider):
    name = 'geekpark'
    allowed_domains = ['geekpark.net']
    start_urls = [
        'http://www.geekpark.net/',
        'http://www.geekpark.net/column/81',
        'http://www.geekpark.net/column/85',
        'http://www.geekpark.net/column/250',
        'http://www.geekpark.net/column/177',
        'http://www.geekpark.net/column/261',
        'http://www.geekpark.net/column/170',
    ]

    def parse(self, response):
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        links = response.css('article.article-item a.img-cover-wrap::attr(href)').extract()
        self.logger.info('The links results >> %s', links)
        if links:
            for detail_link in links:
                if detail_link:
                    yield Request(url='http://www.geekpark.net' + detail_link, callback=self.parse_detail)


    def parse_detail(self, response):
        article_item = ArticleItem()
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        article_item['url'] = current_url
        rt_type = response.css('header.post-header div.label.article-info a.category-tag::text').extract()
        _type = ''
        if rt_type and len(rt_type) > 0:
            _type = rt_type[0]
        article_item['type'] = _type
        rt_title = response.css('header.post-header h1.topic-title::text').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        article_item['title'] = title
        rt_author = response.css('header.post-header div.user-info a.author span::text').extract()
        author = ''
        if rt_author and len(rt_author) > 0:
            author = rt_author[0]
        article_item['author'] = author
        _date = response.css('header.post-header div.user-info span.release-date::text').extract()
        rt_brief = response.css('#article-body div.topic-cover p::text').extract()
        brief = ''
        if rt_brief and len(rt_brief) > 0:
            brief = rt_brief[0]
        article_item['brief'] = brief
        rt_content = response.css('#article-body div.article-content p::text').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        article_item['content'] = content
        article_item['name'] = '极客公园'
        article_item['date'] = _date
        article_item['grade'] = random.randint(90, 98)
        article_item['domain'] = 'geekpark.net'
        self.logger.info('article_item >> %s', article_item)
        yield article_item
