#!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:xaoyaoyao
@file: duanwenxue_spider.py
@time: 2018/08/19
"""
import re
import random
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from scrapy.http import Request
from article_spider.items import ArticleItem


class DuanWenXueSpider(CrawlSpider):
    name = 'duanwenxue'
    allowed_domains = ['duanwenxue.com']
    start_urls = [
        # 'https://www.duanwenxue.com/qinggan/meiwen/',
        # 'https://www.duanwenxue.com/yulu/gaoxiao/',
        # 'https://www.duanwenxue.com/sanwen/suibi/',
        # 'https://www.duanwenxue.com/shanggan/ganrengushi/',
        # 'https://www.duanwenxue.com/qinggan/aiqing/',
        # 'https://www.duanwenxue.com/diary/suibi/',
        'https://www.duanwenxue.com/sanwen/shuqing/',
        'https://www.duanwenxue.com/shige/sanwenshi/',
        'https://www.duanwenxue.com/shige/aiqingshiju/',
        'https://www.duanwenxue.com/sanwen/jingdian/',
        'https://www.duanwenxue.com/shige/aiguo/',
        'https://www.duanwenxue.com/diary/wenzi/',
        'https://www.duanwenxue.com/qinggan/youqing/',
        'https://www.duanwenxue.com/z/xjfmdmy/',
        'https://www.duanwenxue.com/qinggan/qinqing/',
        'https://www.duanwenxue.com/shige/aiqing/',
        'https://www.duanwenxue.com/qinggan/riji/',
        'https://www.duanwenxue.com/diary/jili/',
        'https://www.duanwenxue.com/diary/youxian/',
        'https://www.duanwenxue.com/qinggan/meiwen/',
        'https://www.duanwenxue.com/shige/xiandai/',
        'https://www.duanwenxue.com/shanggan/riji/',
        'https://www.duanwenxue.com/huayu/',
        'https://www.duanwenxue.com/jingdian/gushi/',
        'https://www.duanwenxue.com/rizhi/shangganwenzi/',
        'https://www.duanwenxue.com/jingdian/ganwu/',
        'https://www.duanwenxue.com/jingdian/zheli/',
        'https://www.duanwenxue.com/yuju/yulu/',
        'https://www.duanwenxue.com/huayu/biaobai/',
        'https://www.duanwenxue.com/juzi/beishang/',
        'https://www.duanwenxue.com/yulu/gaoxiao/',
        'https://www.duanwenxue.com/yulu/aiqingxuanyan/',
        'https://www.duanwenxue.com/juzi/biaobai/',
        'https://www.duanwenxue.com/duanwen/geyan/',
        'https://www.duanwenxue.com/yulu/yijuhua/',
        'https://www.duanwenxue.com/huayu/lizhi/',
        'https://www.duanwenxue.com/yulu/shangxinqianming/',
        'https://www.duanwenxue.com/huayu/zheli/',
        'https://www.duanwenxue.com/shanggan/rizhi/',
        'https://www.duanwenxue.com/diary/suigan/',
        'https://www.duanwenxue.com/diary/shige/',
        'https://www.duanwenxue.com/shanggan/ganrengushi/',
        'https://www.duanwenxue.com/qinggan/gushi/',
        'https://www.duanwenxue.com/qinggan/qingganmeiwen/',
        'https://www.duanwenxue.com/sanwen/shanggan/',
        'https://www.duanwenxue.com/shanggan/gushi/',
        'https://www.duanwenxue.com/shige/gelvshi/',
        'https://www.duanwenxue.com/diary/sinian/',
        'https://www.duanwenxue.com/z/xfylxqss/',
        'https://www.duanwenxue.com/diary/ganwu/',
        'https://www.duanwenxue.com/z/zaljdyl/',
        'https://www.duanwenxue.com/shige/gucifengyun/',
        'https://www.duanwenxue.com/diary/jimo/',
        'https://www.duanwenxue.com/diary/wuliao/',
        'https://www.duanwenxue.com/diary/kuaile/',
        'https://www.duanwenxue.com/sanwen/youmei/',
        'https://www.duanwenxue.com/sanwen/aiqing/',
        'https://www.duanwenxue.com/shige/daquan/',
        'https://www.duanwenxue.com/diary/nanguo/',
        'https://www.duanwenxue.com/sanwen/shuqing/',
        'https://www.duanwenxue.com/diary/ganshang/',
        'https://www.duanwenxue.com/diary/xingfu/',
    ]

    def parse(self, response):
        """
        列表页抓取
        :param response:
        :return:
        """
        response_selector = Selector(response=response)
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        # the next page
        next_link = response_selector.xpath(
                u'//div[@class="list-pages"]/ul/li[@class="thisclass"]/following-sibling::li[1]/a/@href').extract()
        self.logger.info('The next link result %s', next_link)
        # the next page
        if next_link:
            if type(next_link) is list:
                next_page = next_link[0]
            else:
                next_page = next_link
            # the page end with html
            if current_url.endswith('.html'):
                next_url = re.sub(r'\w+_\d+.html', next_page, current_url)
            else:
                next_url = current_url + next_page
            self.logger.info('The next item page! %s', next_url)
            ## 随机休眠
            # time.sleep(random.randint(10, 30))
            yield Request(url=next_url, callback=self.parse)

        ## detail pages
        detail_links = response_selector.xpath(
                u'//div[@class="row-left"]//div[@class="list-base-article"]/ul/li//a/@href').extract()
        self.logger.info('The detail results %s', detail_links)
        if detail_links is None or len(detail_links) <= 0:
            detail_links = response_selector.xpath(
                    u'//div[@class="row-left"]//div[@class="list-short-article"]/ul/li/p/a/@href').extract()
            self.logger.info('The detail2 results %s', detail_links)
        if detail_links:
            for detail_link in detail_links:
                if detail_link:
                    # time.sleep(random.randint(1, 5))
                    yield Request(url='https://www.duanwenxue.com' + detail_link, callback=self.parse_detail)

    def parse_detail(self, response):
        """
        详情页抓取
        :param response:
        :return:
        """
        article_item = ArticleItem()
        response_selector = Selector(response=response)
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        article_item['url'] = current_url
        ## content
        rt_content = response_selector.xpath(
                u'//div[@class="row-article"]//div[contains(@class,"article-content")]/p/text()').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        article_item['content'] = content
        ## title
        rt_title = response_selector.xpath(u'//div[@class="row-left"]/div[@class="row-article"]/h1/text()').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        article_item['title'] = title
        ## author
        rt_author = response_selector.xpath(
                u'//div[@class="article-writer"]/div[@class="face"]/a/span/text()').extract()
        author = ''
        if rt_author and len(rt_author) > 0:
            author = rt_author[0]
        article_item['author'] = author
        ## type
        rt_type = response_selector.xpath(u'//div[@class="breadcrumb"]/a[last()]/text()').extract()
        _type = ''
        if rt_type and len(rt_type) > 0:
            _type = rt_type[0]
        article_item['name'] = '短文学'
        article_item['type'] = _type
        article_item['date'] = None
        article_item['grade'] = random.randint(80, 95)
        article_item['domain'] = 'duanwenxue.com'
        self.logger.info('article_item >> %s', article_item)
        yield article_item
