import json
import logging
import scrapy
from crawler.items import*


class SpotSpider(scrapy.Spider):
    name = 'SpotSpider'
    allowed_domains = ['place.qyer.com']
    urls = ['https://place.qyer.com/shanghai/', 'https://place.qyer.com/poi.php?action=list_json']
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
        'referer': 'https://place.qyer.com/shanghai/alltravel/'
    }
    current_page = 1  # 当前页
    max_page = 100  # 最大页
    size = 3  # 每次请求的页数
    form_data = {
        'page': '',
        'type': 'city',
        'pid': '11595',
        'sort': '0',
        'subsort': 'all',
        'isnominate': '-1',
        'haslastm': 'false',
        'rank': '2'
    }  # 表单信息

    # 配置信息
    config = {}
    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.SpotPipeline': 300}
    }

    failed_list = []  # 爬取失败列表

    def __init__(self):
        self.opened()

    def save_config(self):
        self.config['current_page'] = self.current_page
        self.config['failed_list'] = self.failed_list
        with open(f'./crawler/spiders/{self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    # 当爬虫开始工作的时候执行,只有一次
    def opened(self):
        with open(f'./crawler/spiders/{self.name}.json', 'r') as f:
            text = f.read()
        try:
            config = json.loads(text)
            logging.info(f"爬虫:{self.name} 加载配置文件成功!")
        except:
            config = {}
            logging.error(f"爬虫:{self.name} 加载配置文件失败!")
        self.current_page = config.get('current_page', 1)
        self.max_page = config.get('max_page', 2)
        # self.failed_list = config.get('failed_list', [])
        logging.info("启动配置信息:")
        logging.info(f"current_page={self.current_page}")
        logging.info(f'max_page={self.max_page}')
        self.config = config

    # 爬虫结束的时候执行一次
    def closed(self, reason):
        logging.info(f"爬虫:{self.name} 保存配置文件：")
        logging.info(f"current_page={self.current_page}")
        logging.info(f'max_page={self.max_page}')
        logging.info(f"failed_list={self.failed_list}")
        self.save_config()

    def start_requests(self):
        if self.current_page <= self.max_page:
            # self.log(f'发起爬取{self.current_page}请求')
            # meta = {'page': self.current_page}
            self.form_data['page'] = str(self.current_page)
            yield scrapy.FormRequest(url=self.urls[1], headers=self.headers, formdata=self.form_data)
            logging.info(f"正在爬取： {self.current_page}/{self.max_page}页")

    def parse(self, response):
        if response.url == '':
            self.failed(response)
            return
        try:
            obj = json.loads(response.text)
        except Exception as e:
            logging.error(f"json解析出错！, error={repr(e)}")
            self.failed(response)
            return
        error_code = obj['error_code']
        if error_code != 0:
            self.failed(response)
        else:
            spots = obj['data'].get('list', [])
            for each in spots:
                item = SpotItem()
                item['id'] = each['id']
                item['url'] = each['url']
                # item['comment_url'] = each['commentUrl']
                item['cnname'] = each['cnname']
                # item['enname'] = each.get('enname', '')
                item['catename'] = each.get('catename', '')
                item['grade'] = each['grade']
                item['rank'] = each.get('rank', 0)
                yield item
            # 爬取下一页
            self.save_config()
            self.current_page = self.current_page + 1
            yield from self.start_requests()

    def failed(self, response):
        """
        将爬取失败的面页加入失败队列
        """
        page = self.current_page
        if page == 0:
            return
        logging.error(f"第{page}出错,加入失败列表")
        self.failed_list.append(page)
        self.save_config()
