import logging
import json
import re
import pymysql as mysql
from crawler.items import *
from scrapy.selector import Selector
import crawler.ippool as ippool
from os import path


class Spot:
    # 景点ID
    spot_id: int = 0
    # 景点名字
    name: str = ''
    # 评论最大页数
    max_page: int = 0
    # 已经完成的面页
    finished_page: int = 0
    # 失败的面页
    failed_page = []
    # 连续请求失败队列的次数
    failed_times = 0
    # 面页完成情况
    page_status: list = [False]

    def __init__(self, spot_id=0, name=''):
        self.spot_id = spot_id
        self.name = name

    def __str__(self):
        return "{%d, %s}" % (self.spot_id, self.name)

    def get_comment_url(self, page: int=None) ->str:
        """
        获取评论url
        """
        page = page or self.finished_page + 1
        url = f"https://travel.qunar.com/place/api/html/comments/poi/{self.spot_id}?\
                poiList=true&sortField=0&rank=0&pageSize=10&page={page}"
        return url

    def set_max_page(self, max_page: int):
        self.page_status = [False] * max_page
        self.max_page = max_page


class QunarCommentSpider(scrapy.Spider):
    name = 'QunarCommentSpider'
    allowed_domains = ['place.qyer.com']
    # url = 'https://place.qyer.com/poi/V2UJYVFvBzNTZFI6/'
    headers = {
        'referer': 'https://place.qyer.com/shanghai/alltravel/'
    }
    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.CommentPipeline': 300}
    }

    # 每次读取的起点
    start = 1
    # 配置文件
    config: dict = None

    def __init__(self):
        import crawler.db as db
        self.db = db.connect('tour_qunar')
        self.load_config()
        # 初始化数据库查询
        self.init_db()

    # 保存配置文件
    def save_config(self):
        with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    # 加载配置文件
    def load_config(self):
        try:
            with open(f'{path.dirname(__file__)}/config/{self.name}.json', 'r') as f:
                text = f.read()
                f.close()
            try:
                config = json.loads(text)
                logging.info(f"爬虫:{self.name} 加载配置文件成功!")
            except Exception:
                config = {}
                logging.error(f"爬虫:{self.name} 加载配置文件失败!")
        except FileNotFoundError:
            config = {}
        self.config = config

    # 爬虫结束的时候执行一次
    def closed(self, reason):
        ippool.close()
        logging.info(f"{self.name}关闭, reason={reason}")

    def init_db(self):
        db = self.db
        # 创建视图
        sql = "create view `spot_no_comment` as \
        select `spot_id`, `cnname` from `spot` where `spot`.`comment` =0;"
        try:
            db.cursor.execute(sql)
        except mysql.err.OperationalError as e:
            if e.args[0] != 1050:
                raise
        # 查询记录数量
        sql = f"select count(*) from `{db.name}`.`spot_no_comment`"
        db.cursor.execute(sql)
        num = db.cursor.fetchone()[0]
        logging.info(f"剩余带爬取景点数: {num}")

    def next_spot(self) -> Spot or None:
        db = self.db
        size = 1
        sql = f"select * from `spot_no_comment` limit {self.start-1}, {size};"
        db.cursor.execute(sql)
        rows = db.cursor.fetchall()
        length = len(rows)
        if length == 0:
            if self.start == 1:
                logging.info(f'没有数据')
                return None
            else:
                logging.info(f'数据库进行循环查询，start={self.start}')
                self.start = 1
                return self.next_spot()
        row = rows[0]
        spot = Spot(row[0], row[1])
        self.start = self.start + length
        return spot

    def start_requests(self):
        spot = self.next_spot()
        if not spot:
            return
        spot.finished_page = 0
        page = 1
        yield scrapy.Request(url=spot.get_comment_url(page), headers=self.headers,
                             meta={'page': page, 'spot': spot}, dont_filter=True)
        logging.info(f"正在爬取: start={self.start - 1}, spot={spot}")

    @staticmethod
    def parse_page(data) -> int:
        '''
        提取出评论总页数
        :param data dict
        '''

        html = Selector(text=data)
        a = html.css('a.page')
        if len(a) == 0:
            return 1
        # 多页
        try:
            a = a[-2]
            page = a.css('::text').get()
            page = int(page)
        except:
            page = 1
        if page < 1:
            page = 1
        return page

    def parse_comment(self, request, data: dict):
        """
        解析评论
        """
        html = Selector(text=data)
        tag_item = html.css('ul#comment_box>li')
        length = len(tag_item)
        tag_user = tag_item.css('.e_comment_usr_name')

        tag_content = tag_item.css('.e_comment_content')
        tag_title = tag_item.css('.e_comment_title')
        comment_url = tag_title.css('a[data-beacon=comment_title]::attr(href)').extract()
        comments = tag_content.css('p.first')
        tag_info = tag_item.css('.e_comment_add_info')
        date = tag_info.css('ul>li:first_child::text').extract()
        star = tag_item.css('.total_star>span::attr(class)').extract()  # ["cur_star star_5", ]

        spot: Spot = request.meta['spot']
        more_comments = []  # 带爬取的更多评论列表
        for i in range(0, length):
            # 解析用户
            href = tag_user[i].css('a::attr(href)').get()
            if href == 'javascript:;':
                # 携程用户, 跳过
                continue
            if href is not None:
                try:
                    # "https://travel.qunar.com/space/386170362@qunar"
                    user_id = href.split('/')[-1][0:-6]
                    user_id = int(user_id)
                except Exception as e:
                    yield from self.failed(request, f"解析用户ID出错, index={i}, user_id={user_id}, \n error={repr(e)}")
                    return
                user = TouristItem()
                user['id'] = user_id
                user['name'] = tag_user[i].css('a::text').get()
                yield user
            else:
                user_id = None
            # 解析评论
            href = comment_url[i]
            if href == 'javascript:;':
                continue
            try:
                comment_id = href[re.search('-', href).start() + 3: -1]
                comment_id = int(comment_id)
            except Exception as e:
                # 解析评论ID出错，跳过当前页
                yield from self.failed(request, f"解析评论ID出错, index={i}, user_id={user_id}, \nerror={repr(e)}")
                return
            comment = CommentItem()
            comment['spot_id'] = spot.spot_id
            comment['id'] = comment_id
            comment['date'] = date[i]
            try:
                comment['star_level'] = float(star[i][-1])
            except Exception:
                pass
            # 用户ID
            if user_id:
                comment['author_id'] = user_id
            # 照片数量
            img_box = tag_item[i].css('.e_comment_imgs_box')
            if len(img_box) != 0:
                count = img_box.css('span.img_count>a::text')
                if len(count) != 0:
                    count = count.get()[1:-2]
                    try:
                        count = int(count)
                    except:
                        count = 1
                else:
                    count = img_box.css('ul li')
                    count = len(count)
                    if count == 0:
                        count = 1
                comment['img_num'] = count
            # 点赞数
            tag_help = tag_info[i].css('span.tit')
            if len(tag_help) != 0:
                count = tag_help.css('i::text')
                if len(count) != 0:
                    count = count.get()
                    try:
                        count = int(count)
                    except:
                        count = 1
                else:
                    count = 1
                comment['useful'] = count
            more_href = tag_content[i].css('a.seeMore::attr(href)').get()
            if more_href is not None:
                #  跳转面页爬取全部评论
                more_comments.append({'url': more_href, 'item': comment})
            else:
                comment['content'] = ''.join(comments[i].css(' ::text').extract())
                yield comment

        page = request.meta['page']
        if len(more_comments) == 0:
            spot.finished_page = spot.finished_page + 1
            spot.page_status[page - 1] = True
            logging.info(f"spot={spot}, {spot.finished_page}/{spot.max_page}")
            yield from self.finish_page(spot)
        else:
            index = {
                'remained_num': len(more_comments)
            }
            for each in more_comments:
                yield scrapy.Request(url=each['url'], headers=self.headers, meta={
                    'index': index,
                    'comment': each,
                    'page': page,
                    'spot': spot
                }, priority=255, callback=self.parse_comment_more, dont_filter=True)

    def parse_comment_more(self, response):
        """
        解析评论主页
        """
        request: scrapy.Request = response.request
        meta = request.meta
        comment = meta['comment']
        if 'exception' in meta:
            yield from self.failed(request, "请求发生错误", comment['url'])
            return

        spot: Spot = meta['spot']
        page = meta['page']
        if spot.page_status[page-1]:
            return
        # 解析
        comment_box = response.css('div.b_comment_detail')
        # 评论
        content = comment_box.css('.comment_content p::text').extract()
        item: CommentItem = comment['item']
        item['content'] = '\n\r'.join(content)
        yield item

        remained_num = meta['index']['remained_num'] - 1
        meta['index']['remained_num'] = remained_num
        if remained_num == 0:
            spot.finished_page = spot.finished_page + 1
            logging.info(f"spot={spot}, {spot.finished_page}/{spot.max_page} [more]")
            page = meta['page']
            spot.page_status[page-1] = True
            yield from self.finish_page(spot)

    def parse(self, response):
        """
        解析response, json解码
        """
        request = response.request
        meta = request.meta
        if 'exception' in request.meta:
            yield from self.failed(request, "请求发生错误")
            return
        spot: Spot = meta['spot']
        page = meta['page']
        if spot.page_status[page-1]:
            return
        try:
            obj = response.json()
        except Exception:
            yield from self.failed(request, f"json解析出错")
            return
        error_code = obj.get('errcode')
        if error_code != 0:
            if error_code == 200:
                if spot.max_page < 1:
                    logging.warning(f"参数错误的请求，已经放弃爬取该景点, spot={spot}")
                    yield from self.finish_spot(spot)
                    # 请求下一个景点
                    yield from self.start_requests()
                    return
            yield from self.failed(request, f"请求到失败的数据,\n error_code={error_code}, \
            msg={obj.get('errmsg', '未知错误')}, url={response.url}")
            return

        data = obj['data']
        if spot.max_page < 1:
            page = self.parse_page(data)
            if not page:
                self.failed(request, "最大页码错误, spot={spot}")
                # 请求下一个景点
                yield from self.start_requests()
                return
            spot.set_max_page(page)
            logging.info(f"max_page={page}, spot={spot}")
            # 生成连接
            for i in range(2, page + 1):
                yield scrapy.Request(url=spot.get_comment_url(i), headers=self.headers,
                                     meta={'page': i, 'spot': spot}, dont_filter=True)
        yield from self.parse_comment(request, data)

    def failed(self, request, msg: str='', url=None):
        # 保存请求错误的url
        url = url or request.url
        meta = request.meta
        page = meta.get('page')
        spot: Spot = meta['spot']
        if page:
            logging.error(f"景点评论爬取失败,spot={spot}, page={page}, msg={msg}")
            # 保存爬取失败的url
            spot.failed_page.append(page)
        else:
            raise ValueError(f"page is None, url={url}, \n meta={meta}")
        yield from self.finish_page(spot)

    def finish_page(self, spot: Spot):
        length = len(spot.failed_page)
        if spot.finished_page + length >= spot.max_page:
            if spot.max_page < 1:
                return
            if length > 0:
                logging.info(f"请求错误列表面页, length={length}, spot={spot}")
                spot.failed_times = spot.failed_times + 1
                if spot.failed_times == 4:
                    # 放弃请求
                    # 当前景点爬取完成
                    yield from self.finish_spot(spot)
                    # 请求下一个景点评论
                    yield from self.start_requests()
                    return
                # 生成连接
                if spot.failed_times < 4:
                    for i in spot.failed_page:
                        yield scrapy.Request(url=spot.get_comment_url(i), headers=self.headers,
                                             meta={'page': i, 'spot': spot}, dont_filter=True)
                    spot.failed_page.clear()
            else:
                # 当前景点爬取完成
                yield from self.finish_spot(spot)
                # self.save_config()
                # 请求下一个景点评论
                yield from self.start_requests()

    @staticmethod
    def finish_spot(spot: Spot):
        # 当前景点爬取完成
        item = CommentFinishedItem()
        item['finished'] = 1
        item['spot_id'] = spot.spot_id
        logging.info(f"评论爬取完成,spot={spot}")
        yield item


if __name__ == '__main__':
    from scrapy import cmdline
    from os import path
    args = f"scrapy crawl {path.basename(__file__).split('.')[0]}".split()
    cmdline.execute(args)
