#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: xaoyaoyao
@contact: xaoyaoyao@aliyun.com
@file: bufanbiz_spider.py
@time: 2018/08/26
"""
import random
import json
import scrapy
import time
from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from scrapy.linkextractors import LinkExtractor
from article_spider.items import ArticleItem

class BufanbizSpider(CrawlSpider):
    name = 'bufanbiz'
    allowed_domains = ['bufanbiz.com']
    start_urls = [
        'https://www.bufanbiz.com/api/website/articles/?p={}&n=20&type={}',
    ]
    allowed_types = [
        '', '新消费', '金融科技', '出行物流', '文娱', '人工智能', '人物', '干货', '其他'
    ]

    def start_requests(self):
        for i in range(1, 15):
            for j in self.allowed_types:
                url = str(self.start_urls[0]).format(i, j)
                time.sleep(1)
                yield scrapy.Request(url=url, encoding='utf-8', callback=self.parse)

    def parse(self, response):
        current_url = response.url
        self.logger.info('[BufanbizSpider] This is an item page! %s', current_url)
        try:
            result = response.body.decode()
            self.logger.info('[BufanbizSpider] The results >> %s ', result)
            json_result = json.loads(result)
            data = json_result['data']
            if data:
                for rt in data:
                    uid = rt['uid']
                    if uid:
                        url = 'https://www.bufanbiz.com/post/{}.html'.format(uid)
                        yield Request(url=url, callback=self.parse_detail)
        except Exception as e:
            self.logger.error("[BufanbizSpider] the link detail error. The msg %s", str(e))


    def parse_detail(self, response):
        article_item = ArticleItem()
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        article_item['url'] = current_url
        # title
        rt_title = response.css('div.detail-content h1.title::text').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        article_item['title'] = title
        # content
        rt_content = response.css('div.detail-content div.content-detail span::text').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        article_item['content'] = content
        ## author
        rt_author = response.css('div.detail-content div.author-share div.author span.name_time span.author-name::text').extract()
        author = ''
        if rt_author and len(rt_author) > 0:
            author = rt_author[0]
        article_item['author'] = author
        rt_date = response.css('div.detail-content div.author-share div.author span.name_time span.time::text').extract()
        _date = None
        if rt_date and len(rt_date) > 0:
            _date = rt_date[0]
        article_item['date'] = _date
        rt_type = response.css('div.detail-content div.author-share div.author span.article-type::text').extract()
        _type = ''
        if rt_type and len(rt_type) > 0:
            _type = rt_type[0]
        rt_brief = response.css('div.detail-content div.content-lead::text').extract()
        _brief = ''
        if rt_brief and len(rt_brief) > 0:
            _brief = rt_brief[0]
        article_item['brief'] = _brief
        article_item['type'] = _type
        article_item['name'] = '不凡商业'
        article_item['grade'] = random.randint(88, 95)
        article_item['domain'] = 'bufanbiz.com'
        self.logger.info('article_item >> %s', article_item)
        yield article_item