# -*- coding: utf-8 -*-
import scrapy
import json
import re

from baiduNewsSpider.items import BaidunewsspiderItem


class NewsbaiduSpider(scrapy.Spider):
    name = 'newsbaidu'
    allowed_domains = ['http://news.baidu.com/']
    start_urls = []

    def start_requests(self):
        # 重写请求构造函数
        baseurl = 'http://news.baidu.com/widget?id={}&ajax=json&t=1565919256596'
        widget_ids = ['civilnews',
                      'InternationalNews',
                      'EnterNews',
                      'SportNews',
                      'FinanceNews',
                      'TechNews',
                      'MilitaryNews',
                      'InternetNews',
                      'DiscoveryNews',
                      'LadyNews',
                      'HealthNews',
                      'PicWall']
        return [scrapy.FormRequest(baseurl.format(_)) for _ in widget_ids]

    def parse(self, response):  # 解析函数
        widgetid = re.match(r'.*?id=(.*?)&', response.url).group(1) # 从response中提取url，使用正则匹配提取id值
        jsondate = json.loads(response.text)  # 将response的内容使用json解析
        news_list = jsondate.get('data').get(widgetid).get('focusNews')  # 提取内容
        for news in news_list:
            yield BaidunewsspiderItem(news_url=news['m_url'])  # 返回一个迭代器
