# -*- coding: UTF-8 -*-
import scrapy
import messAround.util.help as util


# 百度贴吧
# http://tieba.baidu.com/hottopic/browse/topicList
# scrapy crawl baidu_tieba
class BaiduSpider(scrapy.Spider):
    # 来源
    source = 2

    name = 'baidu_tieba'

    allowed_domains = ['tieba.baidu.com']

    start_urls = ['http://tieba.baidu.com']

    custom_settings = {
        'ITEM_PIPELINES': {
            'messAround.pipeline.baidu.BaiduTiebaTopicPipeline': 300
        }
    }

    def start_requests(self):
        url = 'http://tieba.baidu.com/hottopic/browse/topicList'
        yield scrapy.Request(url=url, headers=util.default_headers, callback=self.parse)

    def parse(self, response):
        result = util.make_json(response.text)
        if result['errmsg'] == "success":
            for key, val in enumerate(result['data']['bang_topic']['topic_list']):
                yield {
                    'source': self.source,
                    'no': key + 1,
                    'topic_id': val['topic_id'],
                    'title': val['topic_name'],
                    'full_title': val['abstract'],
                    'link': val['topic_url'],
                    'info': val['topic_desc'],
                    'cover': val['topic_pic'],
                    'discuss_num': val['discuss_num'],
                    'create_time': val['create_time'],
                }
            pass
