# -*- coding: utf-8 -*-
import scrapy
from air_history.items import AirHistoryItem
from air_history.settings import logger

class AreaSpiderSpider(scrapy.Spider):
    name = 'area_spider'
    allowed_domains = ['aqistudy.cn']  # 爬取的域名，不会超出这个顶级域名
    base_url = "https://www.aqistudy.cn/historydata/"
    start_urls = [base_url]

    def parse(self, response):
        logger.info('parse myprint area_spider.py 第一步')
        print('爬取城市信息....')
        # url_list = response.xpath("//div[@class='all']/div[@class='bottom']/ul/div[2]/li/a/@href").extract()  # 全部链接
        # city_list = response.xpath("//div[@class='all']/div[@class='bottom']/ul/div[2]/li/a/text()").extract()  # 城市名称
        # url_list = response.xpath("//div[@class='all']/div[@class='bottom']/ul/div[2]/li[position()<2]/a/@href").extract()  # 全部链接
        # city_list = response.xpath("//div[@class='all']/div[@class='bottom']/ul/div[2]/li[position()<2]/a/text()").extract()  # 城市名称
        # url_list = ['monthdata.php?city=阿坝州', 'monthdata.php?city=蚌埠']
        # city_list = ['阿坝州', '蚌埠']
        url_list = ['monthdata.php?city=蚌埠']
        city_list = ['蚌埠']
        # logger.info('url_list myprint')
        # logger.info(url_list)
        # logger.info('city_list myprint')
        # logger.info(city_list)
        for url, city in zip(url_list, city_list):
            url = self.base_url + url
            yield scrapy.Request(url=url, callback=self.parse_month, meta={'city': city}) # spider --> engine --> 放进scheduler 队列

    def parse_month(self, response):
        logger.info('parse_month myprint area_spider.py 第二步')
        print('爬取{}月份...'.format(response.meta['city']))
        # url_list = response.xpath('//tbody/tr/td/a/@href').extract()
        url_list = ['daydata.php?city=蚌埠&month=2014-12', 'daydata.php?city=蚌埠&month=2015-01', 'daydata.php?city=蚌埠&month=2015-02']
        # logger.info('parse_month url_list myprint')
        # logger.info(url_list)
        for url in url_list:
            url = self.base_url + url
            yield scrapy.Request(url=url, callback=self.parse_day, meta={'city': response.meta['city']})  # spider --> engine --> url 放进 scheduler 队列，并告诉解析这些url下载的html代码用 parse_day 函数处理

    def parse_day(self, response):
        logger.info('parse_day myprint area_spider.py 第三步')
        print('爬取最终数据...')
        item = AirHistoryItem()
        node_list = response.xpath('//tr')
        logger.info('parse_day node_list myprint')
        logger.info(node_list)
        node_list.pop(0)  # 去除area_spider.py 第一行标题栏
        for node in node_list:
            item['data'] = node.xpath('./td[1]/text()').extract_first()
            item['city'] = response.meta['city']
            item['aqi'] = node.xpath('./td[2]/text()').extract_first()
            item['level'] = node.xpath('./td[3]/text()').extract_first()
            item['pm2_5'] = node.xpath('./td[4]/text()').extract_first()
            item['pm10'] = node.xpath('./td[5]/text()').extract_first()
            item['so2'] = node.xpath('./td[6]/text()').extract_first()
            item['co'] = node.xpath('./td[7]/text()').extract_first()
            item['no2'] = node.xpath('./td[8]/text()').extract_first()
            item['o3'] = node.xpath('./td[9]/text()').extract_first()
            yield item # spider --> pipeline