# -*- coding: utf-8 -*-
"""
增量爬取24小时内全球地震数据
"""
import scrapy
from ..items import EarthquakeItem


class IncrementSpider(scrapy.Spider):
    name = 'increment'
    allowed_domains = ['www.ceic.ac.cn']
    # 起始爬取路径
    start_url = 'http://www.ceic.ac.cn/ajax/speedsearch?num=1&&page='
    # 爬取页码循环次数
    MAX_PAGE = 1

    
    def start_requests(self):
        # 遍历各页
        for i in range(1, self.MAX_PAGE+1):
            yield scrapy.Request('%s%d' % (self.start_url, i), callback=self.parse, dont_filter=True)

    def parse(self, response):
        result=eval(response.body.decode('utf-8'))
        records=result['shuju']

        for record in records:
            item=EarthquakeItem()

            item['earthquake_level']=record['M']
            item['earthquake_time']=record['O_TIME']
            item['earthquake_lon']=record['EPI_LON']
            item['earthquake_lat']=record['EPI_LAT']
            item['earthquake_depth']=record['EPI_DEPTH']
            item['earthquake_address']=record['LOCATION_C']
            item['did']=record['id']

            yield item