import json
import logging
import re
import scrapy
from crawler.items import*
import crawler.db as db

class QunarSpotSpider(scrapy.Spider):
    name = 'QunarSpotSpider'
    allowed_domains = ['travel.qunar.com']
    urls = ['https://travel.qunar.com/p-cs299878-shanghai-jingdian-3-%d']
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
        'referer': 'http://travel.qunar.com/place/'
    }
    current_page = 1  # 当前页
    max_page = 2  # 最大页

    # 配置信息
    config = {}
    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.SpotPipeline': 300}
    }

    failed_list = []  # 爬取失败列表

    def __init__(self):
        self.db = db.connect('tour_qunar')
        self.opened()

    def save_config(self):
        self.config['current_page'] = self.current_page
        self.config['failed_list'] = self.failed_list
        from os import path
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    # 当爬虫开始工作的时候执行,只有一次
    def opened(self):
        from os import path
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'r') as f:
            text = f.read()
        try:
            config = json.loads(text)
            logging.info(f"爬虫:{self.name} 加载配置文件成功!")
        except:
            config = {}
            logging.error(f"爬虫:{self.name} 加载配置文件失败!")
        self.current_page = config.get('current_page', 1)
        self.max_page = config.get('max_page', 200)
        logging.info("启动配置信息:")
        logging.info(f"current_page={self.current_page}")
        logging.info(f'max_page={self.max_page}')
        self.config = config

    # 爬虫结束的时候执行一次
    def closed(self, reason):
        logging.info(f"爬虫:{self.name} 保存配置文件：")
        logging.info(f"current_page={self.current_page}")
        logging.info(f'max_page={self.max_page}')
        logging.info(f"failed_list={self.failed_list}")
        self.save_config()
        self.db.close()

    def start_requests(self):
        if self.current_page <= self.max_page:
            # self.log(f'发起爬取{self.current_page}请求')
            # meta = {'page': self.current_page}
            url = self.urls[0] % self.current_page
            yield scrapy.Request(url=url, headers=self.headers, meta={'page': self.current_page})
            logging.info(f"正在爬取： {self.current_page}/{self.max_page}页")

    def parse(self, response):
        if response.url == '':
            self.failed(response, '请求发生错误')
            return

        li = response.css('.list_item.clrfix li')
        length = len(li)
        if length == 0:
            self.failed(response, '没有景点数据')
            return
        lat = li.css('::attr(data-lat)').extract()
        lng = li.css('::attr(data-lng)').extract()
        url = li.css('.ct + a::attr(href)').extract()
        _txtbox = li.css('.ct .txtbox.clrfix')
        _titbox = li.css('.ct .titbox.clrfix')
        name = _titbox.css('a .cn_tit::text').extract()
        comment_num = _titbox.css('.countbox .comment_sum::text').extract()
        _countbox = _txtbox.css('.countbox')
        grade = _countbox.css('.total_star span::attr(style)').extract()

        def get_id(_url: str) ->int:
            span = [m.start() for m in re.finditer('-', _url)]
            spot_id = int(_url[span[0]+3: span[1]])  # 前闭后开
            return spot_id

        for i in range(0, length):
            if int(comment_num[i] == 0):
                continue
            item = SpotItem()
            try:
                item['url'] = url[i]
                item['cnname'] = name[i]
                item['lat'] = float(lat[i])
                item['lng'] = float(lng[i])
                item['id'] = get_id(url[i])
                try:
                    _grade = int(grade[i])
                    if _grade > 0:
                        item['grade'] = _grade / 10
                except:
                    pass
            except:
                self.failed(response, f"景点数据错误, index={i}, \n url={url}, \nname={name}")
                return
            rank = _countbox.css('.ranking_sum')[i].css('.sum::text').get()
            if rank:
                item['rank'] = int(rank)
            yield item

        # 爬取下一页
        self.save_config()
        self.current_page = self.current_page + 1
        yield from self.start_requests()

    def failed(self, response):
        """
        将爬取失败的面页加入失败队列
        """
        page = self.current_page
        if page != 0:
            logging.error(f"第{page}出错,加入失败列表")
            self.failed_list.append(page)
            self.save_config()
        self.current_page = self.current_page + 1
        yield from self.start_requests()


if __name__ == '__main__':
    from scrapy import cmdline
    from os import path
    args = f"scrapy crawl {path.basename(__file__).split('.')[0]}".split()
    cmdline.execute(args)
