#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: xaoyaoyao
@contact: xaoyaoyao@aliyun.com
@file: toutiao_spider.py
@time: 2018/08/26
"""

import random

from scrapy.http import Request
from scrapy.linkextractors import LinkExtractor

from article_spider.items import ArticleItem
from article_spider.spiders.dynamic.js_spider import JSSpider


class ToutiaoSpider(JSSpider):
    name = 'toutiao'
    allowed_domains = ['toutiao.com']
    start_urls = [
        'https://www.toutiao.com/ch/news_hot/',
        'https://www.toutiao.com/ch/news_tech/',
        'https://www.toutiao.com/ch/news_travel/',
        'https://www.toutiao.com/ch/news_game/',
        'https://www.toutiao.com/ch/news_car/',
        'https://www.toutiao.com/ch/news_finance/',
        'https://www.toutiao.com/ch/funny/',
        'https://www.toutiao.com/ch/news_discovery/',
        'https://www.toutiao.com/ch/news_food/',
        'https://www.toutiao.com/ch/news_essay/',
        'https://www.toutiao.com/ch/news_fashion/',
        'https://www.toutiao.com/ch/news_sports/',
        'https://www.toutiao.com/ch/news_baby/',
        'https://www.toutiao.com/ch/news_entertainment/',
    ]

    def __init__(self):
        super(ToutiaoSpider, self).__init__()

    def parse(self, response):
        current_url = response.url
        self.logger.info('[ToutiaoSpider] This is an item page! %s', current_url)
        try:
            d_link = LinkExtractor(
                    restrict_xpaths='//div[@class="wcommonFeed"]//ul/li[contains(@class, "item")]//div[@class="rbox-inner"]/div[@class="title-box"]/a')
            if d_link:
                links = d_link.extract_links(response)
                if links:
                    self.logger.info('[ToutiaoSpider] The links >> %s ', links)
                    for detail_link in links:
                        if detail_link:
                            yield Request(url=detail_link.url, callback=self.parse_detail)
        except Exception as e:
            self.logger.error("[ToutiaoSpider] the link detail error. The msg %s", str(e))

    def parse_detail(self, response):
        article_item = ArticleItem()
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        article_item['url'] = current_url
        # title
        rt_title = response.css('div.article-box h1.article-title::text').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        article_item['title'] = title
        # content
        rt_content = response.css('div.article-box div.article-content p::text').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        article_item['content'] = content
        ## author
        rt_author_date = response.css('div.article-box div.article-sub span::text').extract()
        author = ''
        _date = None
        try:
            if rt_author_date and len(rt_author_date) > 1:
                l_size = len(rt_author_date)
                author = rt_author_date[l_size - 2]
                _date = rt_author_date[l_size - 1]
        except Exception as e:
            self.logger.error("[ToutiaoSpider] parse author && date error. The msg %s", str(e))
        article_item['author'] = author
        article_item['date'] = _date
        rt_type = response.css('div.bui-left.chinese-tag a::text').extract()
        _type = ''
        if rt_type and len(rt_type) > 0:
            _type = rt_type[1]
        article_item['type'] = _type
        article_item['name'] = '今日头条'
        article_item['grade'] = random.randint(88, 95)
        article_item['domain'] = 'toutiao.com'
        self.logger.info('article_item >> %s', article_item)
        yield article_item
