# -*- coding: utf-8 -*-
import scrapy
import json
from news.items import *
from news.utils import *
import datetime
import hashlib
class ToutiaoSpider(scrapy.Spider):
    name = 'toutiao'
    allowed_domains = ['toutiao.com']
    start_urls = ['http://toutiao.com/']
    keywords = ['租赁租房', '公寓租房', '长租公寓', '人才公寓', '出租住房', '公寓运营', '集中式公寓', '酒店式公寓', '住房租赁']
    custom_settings = {
        'DOWNLOAD_DELAY': 1.18,
        'CONCURRENT_REQUESTS': 2,
    }
    list_item_dict = {}
    def start_requests(self):
        for kw in self.keywords:
            for offset in range(0, 20, 20):
                url = 'https://www.toutiao.com/search_content/?offset=%s&format=json&autoload=true&count=20&cur_tab=1&from=search_tab&keyword=%s' % (offset,kw)
                yield scrapy.Request(url, self.parse)
    def parse(self, response):
        content = json.loads(response.body)
        for item in content['data']:
            try:
                segs = item['source_url'].split('/')
                url = '/a'+segs[2]
                key = md5(item['title'])
                self.list_item_dict[key] = item
                yield response.follow(url, self.parse_page)
            except Exception as e:
                pass

    def parse_page(self, response):
        news = NewsItem()
        # print(response.css('script').re(r'\s*title\:\s?\'(.*)\''))
        news['title'] = response.css('script').re_first(r'\s*title\:\s?\'(.*)\'')
        news['content'] = response.css('script').re_first(r'\s*content\:\s?\'(.*)\'')
        news['img'] = response.css('script').re_first(r'\s*avatarUrl\:\s?\'(.*)\'')
        news['publisher'] = response.css('script').re_first(r'\s*source\:\s?\'(.*)\'')
        news['publish_date'] = response.css('script').re_first(r'\s*time\:\s?\'(.*)\'')
        news['source_from'] = 'toutiao'
        news['crawl_date'] = datetime.datetime.now()

        key = md5(news['title'])
        list_item = self.list_item_dict[key]
        news['img'] = list_item['image_url']
        news['desc'] = list_item['abstract']
        news['publish_date'] = int(list_item['create_time'])
        if news['content'] is not None:
            yield news
        else:
            pass
