import logging
import json
import pymysql as mysql
from crawler.items import*
from scrapy.selector import Selector
import crawler.db as db
import crawler.ippool as ippool


class CommentSpider(scrapy.Spider):
    name = 'CommentSpider'
    allowed_domains = ['place.qyer.com']
    # url = 'https://place.qyer.com/poi/V2UJYVFvBzNTZFI6/'
    headers = {
        'referer': 'https://place.qyer.com/shanghai/alltravel/'
    }
    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.CommentPipeline': 300}
    }


    def __init__(self):
        self.config = {}
        self.opened()
    spot = {
        'name': '',
        'id': 0,
        'max_page': 0
    }

    # 保存配置文件
    def save_config(self):
        self.config['start'] = self.start
        with open(f'spiders/config/{self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    # 当爬虫开始工作的时候执行,只有一次
    def opened(self):
        with open(f'spiders/config/{self.name}.json', 'r') as f:
            text = f.read()
        try:
            config = json.loads(text)
            logging.info(f"爬虫:{self.name} 加载配置文件成功!")
        except:
            config = {}
            logging.error(f"爬虫:{self.name} 加载配置文件失败!")
        self.start = config.get('start', 1)
        logging.info("启动配置信息:")
        logging.info(f"start={self.start}")
        self.config = config

        # 初始化数据库查询
        self.init_db()

    # 爬虫结束的时候执行一次
    def closed(self, reason):
        ippool.close()
        self.save_config()

    # 生成请求连接
    def get_url(self, page=None):
        page = page or self.current_page
        url = f"https://place.qyer.com/poi.php? \
                              action=comment&page={page}&order=1&poiid={self.spot['id']}&starLevel=all"
        return url

    def init_db(self):
        # 创建视图
        sql = "create view `spot_no_comment` as select `spot_id`, `cnname` from `tour`.`spot` where `spot`.`comment` =0;"
        try:
            db.cursor.execute(sql)
        except mysql.err.OperationalError as e:
            if e.args[0] != 1050:
                raise
        # 查询记录数量
        sql = "select count(*) from `tour`.`spot_no_comment`"
        db.cursor.execute(sql)
        num = db.cursor.fetchone()[0]
        logging.info(f"剩余带爬取景点数: {num}")

    # 每次读取的起点
    start = 1

    def next_spot(self):
        sql = f"select * from `spot_no_comment` limit {self.start-1}, 1;"
        db.cursor.execute(sql)
        rows = db.cursor.fetchall()
        length = len(rows)
        if length == 0:
            if self.start == 1:
                logging.info(f'没有数据')
                return None
            else:
                logging.info(f'数据库进行循环查询，start={self.start}')
                self.start = 1
                return self.next_spot()
        try:
            spot = rows[0]
        except Exception as e:
            spot = None
            logging.error(f"数据查询出错,results={rows},\n error={repr(e)}")
            raise
        self.start = self.start + length
        return spot

    def start_requests(self):
        self.spot['max_page'] = 0
        self.current_page = 1
        spot = self.next_spot()
        if not spot:
            return
        self.spot['id'] = spot[0]
        self.spot['name'] = spot[1]
        yield scrapy.Request(url=self.get_url(), headers=self.headers)
        logging.info(f"正在爬取:start={self.start - 1}, name={self.spot['name']}, spot_id={self.spot['id']}")

    def parse_page(self, data) -> int:
        '''
        提取出评论总页数
        :param data dict
        '''
        if 'pagehtml' in data:
            try:
                html = Selector(text=data['pagehtml'])
                a = html.css('a')
                if len(a) < 3:
                    page = 1
                else:
                    page = a[-2]
                    page = page.css('::attr(data-page)').get()
                    page = int(page)
            except Exception as e:
                logging.error(f"查找最大页数错误,spot_name={self.spot['name']}, spot_id={self.spot['id']}\n \
                               html={html} \n a={a} \nerror={repr(e)}")
                return
        else:
            page = 1
        if page < 1:
            page = 1
        return page
        logging.info(f"获得最大页数,spot_name={self.spot['name']}, max_page={page}")
        self.spot['max_page'] = page
        # 加入爬取请求

    # 当前要爬取的页数
    current_page = 1

    def parse_comment(self, data:   dict):
        '''
        解析评论
        '''

        comments = data.get('lists', [])
        for each in comments:
            user = TouristItem()
            user_info = each['userinfo']
            link = user_info['link']
            group = link.split('/')
            user_id = group[-1]
            if user_id == '':
                user_id = group[-2]
            user_id = int(user_id)
            user['name'] = user_info['name']
            user['id'] = user_id
            yield user

            comment = CommentItem()
            comment['spot_id'] = self.spot['id']
            comment['id'] = each['id']
            comment['star_level'] = each['starlevel']
            comment['date'] = each['date']
            comment['useful'] = each.get('useful', 0)
            comment['content'] = each.get('content', '')
            comment['author_id'] = user_id
            yield comment
            # logging.info(f"抓取评论, user_name={user['name']},comment_id={comment['id']}")

        if self.spot['max_page'] == 0:
            yield from self.failed("页码错误")
            return
        logging.info(f"当前景点进度: {self.current_page}/{self.spot['max_page']} 页")
        if self.current_page >= self.spot['max_page']:
            # 当前景点爬取完成
            item = CommentFinishedItem()
            item['finished'] = 1
            item['spot_id'] = self.spot['id']
            logging.info(f"当前景点评论爬取完成,spot_name={self.spot['name']}")
            yield item
            self.save_config()
            # 请求下一个景点评论
            yield from self.start_requests()
        else:
            self.current_page = self.current_page + 1

    def parse(self, response):
        '''
        解析response, json解码
        '''
        if response.url == '':
            yield from self.failed("请求错误")
            return
        try:
            obj = json.loads(response.text)
        except Exception as e:
            yield from self.failed(f"json解析出错！, url={response.url}\n error={repr(e)}")
            return

        if obj['error_code'] != 0:
            yield from self.failed(f"请求到失败的数据, \n \
            error_code={obj['error_code']}, msg={obj.get('result', 'error')}, url={response.url}")
            return

        data = obj['data']
        if self.spot['max_page'] < 1:
            page = self.parse_page(data)
            if not page:
                yield from self.failed("最大页码错误")
                return
            self.spot['max_page'] = page
            logging.info(f"max_page={page}")
            # 生成连接
            for i in range(2, page + 1):
                yield scrapy.Request(url=self.get_url(i), headers=self.headers)
            yield from self.parse_comment(data)
        else:
            yield from self.parse_comment(data)

    def failed(self, msg: str):
        item = CommentFinishedItem()
        item['finished'] = 0
        item['spot_id'] = self.spot['id']
        logging.info(f"景点评论爬取失败,spot_name={self.spot['name']}, spot_id={self.spot['id']}, msg={msg}")
        yield item
        # 直接请求下一个景点评论
        yield from self.start_requests()
