import json
import logging
import re
import scrapy
from crawler.items import*
import crawler.db as db


class CTripSpotSpider(scrapy.Spider):
    name = 'CTripSpotSpider'
    allowed_domains = ['you.ctrip.com']
    url = 'https://you.ctrip.com/sight/shanghai2/s0-p%d.html#sightname'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
        'referer': 'https://you.ctrip.com/place/'
    }
    page_pointer = 1  # 当前爬取面页指针
    max_page = 2  # 最大页

    default_group_size = 1  # 默认的每组同时爬取的页数
    group_size = 1  # 当前每组同时爬取的页数
    current_group_size = 0  # 当前组已经爬取的数量

    # 配置信息
    config = {}
    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.SpotPipeline': 300}
    }

    page_queue: list = None # 待爬取面页列表
    failed_list = []  # 爬取失败列表

    def __init__(self):
        self.db = db.connect('tour_ctrip')
        self.opened()
        # 正则表达式对象
        self.pattern_comment_num = re.compile(r'\d+')
        self.pattern_spot_id = re.compile(r'(\d+)\.html')

    def save_config(self):
        self.config['page_pointer'] = self.page_pointer
        self.config['failed_list'] = self.failed_list
        from os import path
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    # 当爬虫开始工作的时候执行,只有一次
    def opened(self):
        from os import path
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'r') as f:
            text = f.read()
        try:
            config = json.loads(text)
            logging.info(f"爬虫:{self.name} 加载配置文件成功!")
        except:
            config = {}
            logging.error(f"爬虫:{self.name} 加载配置文件失败!")
        self.page_pointer = config.get('page_pointer', 1)
        self.max_page = config.get('max_page', 200)
        self.default_group_size = config.get('default_group_size', 1)
        logging.info("启动配置信息:")
        logging.info(f"page_pointer={self.page_pointer}")
        logging.info(f'max_page={self.max_page}')
        logging.info(f'default_group_size={self.default_group_size}')
        self.config = config

    # 爬虫结束的时候执行一次
    def closed(self, reason):
        logging.info(f"爬虫:{self.name} 保存配置文件：")
        logging.info(f"page_pointer={self.page_pointer}")
        logging.info(f'max_page={self.max_page}')
        logging.info(f"failed_list={self.failed_list}")
        self.save_config()
        self.db.close()

    def start_requests(self):
        # self.page_queue = range(self.page_pointer, self.max_page)
        # 将失败列表加入
        # for i in self.failed_list:
        #    self.page_queue.append(i)
        if self.page_pointer <= self.max_page:
            for i in range(self.page_pointer, self.page_pointer + self.default_group_size):
                if i > self.max_page:
                    break
                else:
                    yield from self.next_request()

    def next_request(self):
        if self.page_pointer >= self.max_page:
            # if len(self.failed_list) > 0:
            #     for page in self.failed_list:
            #         url = self.url % page
            #         yield scrapy.Request(url=url, headers=self.headers, dont_filter=True, meta={'page': self.page_pointer})
            return
        else:
            self.page_pointer = self.page_pointer + 1
        url = self.url % self.page_pointer
        yield scrapy.Request(url=url, headers=self.headers, dont_filter=True, meta={'page': self.page_pointer})
        logging.info(f"正在爬取： {self.page_pointer}/{self.max_page}页")

    def parse(self, response):
        request: scrapy.Request = response.request
        meta = request.meta
        if 'exception' in meta:
            yield from self.failed(request, "请求发生错误")
            return

        item_tag = response.css('div.list_wide_mod2>div.list_mod2')
        length = len(item_tag)
        if length == 0:
            self.failed(request, '没有景点数据')
            return

        data_tag = item_tag.css('dl>dt')
        comment_tag = item_tag.css('ul.r_comment')
        name = data_tag.css('a::text').extract()
        href = data_tag.css('a::attr(href)').extract()
        comment_num = comment_tag.css('li>a.recomment::text').extract()

        for i in range(0, length):
            res = self.pattern_comment_num.search(comment_num[i])
            if res is not None:
                try:
                    num = int(res.group())
                except ValueError:
                    num = 0
            else:
                num = 0
            if num < 3:
                logging.info(f"{name[i]}, 评论数量太少，跳过")
                continue
            try:
                spot_id = self.pattern_spot_id.search(href[i])
                spot_id = int(spot_id.group(1))
            except ValueError as e:
                yield from self.failed(request, f"{name[i]}, spot_id 解析错误，href={href[i]}\nerror={repr(e)}")
                return

            item = SpotItem()
            item['id'] = spot_id
            item['cnname'] = name[i]
            try:
                rank = data_tag[i].css('s.g_background::text').get()
                if rank is not None:
                    rank = int(rank[1:-1])
                    item['rank'] = rank
            except:
                pass
            try:
                grade = comment_tag[i].css('a.score>strong::text').get()
                if grade is not None:
                    grade = float(grade)
                    item['grade'] = grade
            except:
                pass
            yield item

        # 爬取下一页
        self.save_config()
        yield from self.next_request()

    def failed(self, request, msg):
        """
        将爬取失败的面页加入失败队列
        """
        meta = request.meta
        page = meta.get('page', 0)
        logging.error(f"爬取面页失败, page={page}, msg={msg}")
        if page != 0:
            logging.error(f"第{page}出错,加入失败列表")
            self.failed_list.append(page)
            self.save_config()
        yield from self.next_request()


if __name__ == '__main__':
    from scrapy import cmdline
    from os import path
    args = f"scrapy crawl {path.basename(__file__).split('.')[0]}".split()
    cmdline.execute(args)
