import logging
import json
import re
import scrapy
from scrapy.selector import Selector
from scrapy import signals
import time
import pymysql as mysql
from crawler.items import *
from os import path

_headers = {
    'content-type': 'application/json',
    'origin': 'https://you.ctrip.com',
    'referer': 'https://you.ctrip.com',
}


class Spot:
    # 评论最大页数
    max_page: int = 0
    # 面页完成情况
    page_status: list = None

    def __init__(self, spot_id=0, name=''):
        self.spot_id = spot_id  # 景点ID
        self.name = name  # 景点名字
        self.poiId = 0  # 景点poiId
        self.finished_page_num = 0   # 已经完成的面页数
        self.page_pointer = 0
        self.finished = False

    def push_item(self, page, item_list):
        self._item_list.append((page, item_list))

    def __str__(self):
        return "{%d, %s}" % (self.spot_id, self.name)

    def to_dict(self):
        return {
            'spot_id': self.spot_id,
            'poiId': self.poiId,
            'name': self.name,
            'max_page': self.max_page,
            'page_status': self.page_status
        }

    @staticmethod
    def from_dict(dic):
        spot = Spot(dic['spot_id'])
        spot.name = dic.get('name')
        spot.max_page = dic.get('max_page')
        spot.page_status = dic.get('page_status')
        if 'poiId' in dic:
            spot.poiId = dic.get('poiId')
        else:
            raise RuntimeError(f"spot={spot} ， has not poiId!")
        num = 0
        for i in range(0, spot.max_page):
            if spot.page_status[i]:
                num = num + 1
        spot.finished_page_num = num
        return spot

    def get_home_page_url(self):
        url = f'https://you.ctrip.com/sight/shanghai2/{self.spot_id}.html#comment'
        return url

    def set_max_page(self, max_page: int):
        self.page_status = [False] * max_page
        self.max_page = max_page

    def get_next_page(self):
        pnt = self.page_pointer
        for i in range(pnt, self.max_page):
            self.page_pointer = i + 1
            if not self.page_status[i]:
                return i + 1
        self.page_pointer = 0
        if pnt > 0:
            for i in range(0, pnt):
                self.page_pointer = i + 1
                if not self.page_status[i]:
                    return i + 1
        return 0


class CTripCommentSpider(scrapy.Spider):
    name = 'CTripCommentSpider'
    allowed_domains = ['you.ctrip.com']

    custom_settings = {
        'ITEM_PIPELINES': {'crawler.pipelines.CommentPipeline': 300},
    }

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = cls(*args, **kwargs)
        spider._set_crawler(crawler)
        crawler.signals.connect(spider.opened, signal=signals.spider_opened)
        return spider

    def __init__(self):
        self.db = None
        self.config: dict = None  # 配置文件
        self.start = 1  # 每次读取的起点
        self.breakpoint_spot: dict = None  # 断点景点
        self.concurrent_page = 4  # 同时爬取的面页数
        self._pattern_poiId = re.compile(r'"poiCommentInfo":{"poiId":(\d+),')  # 匹配poiId
        self.mux = False

    def opened(self):
        import crawler.db as db
        self.db = db.connect('tour_ctrip')
        self.config = self.load_config(self.name)
        self.concurrent_page = self.config.get('concurrent_page') or self.concurrent_page
        # 初始化数据库查询
        self.init_db()

    def save_config(self, name=None):
        with open(f'{path.dirname(__file__)}/config/{name or self.name}.json', 'w') as f:
            json.dump(self.config, f, indent=4, separators=(',', ': '))

    @staticmethod
    def load_config(name) -> dict:
        try:
            with open(f'{path.dirname(__file__)}/config/{name}.json', 'r') as f:
                text = f.read()
                f.close()
            try:
                config = json.loads(text)
                logging.info(f"配置:{name} 加载配置文件成功!")
            except Exception:
                config = {}
                logging.error(f"配置:{name} 加载配置文件失败!")
        except FileNotFoundError:
            config = {}
        return config

    def closed(self, reason):
        logging.info(f"{self.name}关闭, reason={reason}")

    def init_db(self):
        db = self.db
        # 创建视图
        sql = "create view `spot_no_comment` as \
           select `spot_id`, `cnname` from `spot` where `spot`.`comment` =0;"
        try:
            db.cursor.execute(sql)
        except mysql.err.OperationalError as e:
            if e.args[0] != 1050:
                raise
        # 查询记录数量
        sql = f"select count(*) from `spot_no_comment`"
        db.cursor.execute(sql)
        num = db.cursor.fetchone()[0]
        logging.info(f"剩余带爬取景点数: {num}")

    def next_spot(self) -> Spot or None:
        db = self.db
        size = 1
        sql = f"select * from `spot_no_comment` limit {self.start-1}, {size};"
        db.cursor.execute(sql)
        rows = db.cursor.fetchall()
        length = len(rows)
        if length == 0:
            if self.start == 1:
                logging.info(f'没有数据')
                return None
            else:
                logging.info(f'数据库进行循环查询，start={self.start}')
                self.start = 1
                return self.next_spot()
        row = rows[0]
        spot = Spot(row[0], row[1])
        self.start = self.start + length
        return spot

    def start_requests(self):
        # 上一次断点景点
        if 'breakpoint_spot' in self.config:
            spot = self.config['breakpoint_spot']
            spot = Spot.from_dict(spot)
            logging.info(f"断点续爬, spot={spot}")
            num = 0
            for i in range(0, self.concurrent_page):
                page = spot.get_next_page()
                if page > 0:
                    num = num + 1
                    yield from self.comment_request(spot, page)
                else:
                    break
            if num > 0:
                self.mux = True
                return

        yield from self.next_requests()
        self.mux = True

    def next_requests(self):
        if self.mux:
            return
        spot = self.next_spot()
        if not spot:
            return
        logging.info(f"正在爬取, spot={spot}")
        yield scrapy.Request(url=spot.get_home_page_url(), meta={'spot': spot}, dont_filter=True)

    def comment_request(self, spot: Spot, page: int):
        data = {"arg": {
            "channelType": 2,
            "collapseType": 0,
            "commentTagId": 0,
            "pageIndex": page,
            "pageSize": 10,
            "poiId": spot.poiId,
            "sourceType": 1,
            "sortType": 3,
            "starType": 0
        }}
        url = "https://m.ctrip.com/restapi/soa2/13444/json/getCommentCollapseList"
        yield scrapy.Request(url, method="POST", body=json.dumps(data),
                             meta={'spot': spot, 'page':page},
                             headers=_headers, dont_filter=True,
                             callback=self.parse_json)

    def get_profile(self, response, spot: Spot):
        pattern = re.compile(r'"poiCommentInfo":{"poiId":(\d+),')
        res = pattern.search(response.text)
        if res is None:
            return False
        poi_id = res.group(1)
        try:
            poi_id = int(poi_id)
            spot.poiId = poi_id
        except Exception as e:
            logging.error(f"解析poiId错误, {repr(e)}")
            return False
        logging.info(f"poiId={poi_id}")
        pagination = response.css("ul.ant-pagination>li.ant-pagination-item[title]::attr(title)").extract()
        if len(pagination) == 0:
            logging.error("最大面页解析错误")
            return False
        for i in range(len(pagination) - 1, -1, -1):
            try:
                page = int(pagination[i])
                spot.set_max_page(page)
                logging.info(f"max_page={page}")
                return True
            except ValueError:
                pass
        return False

    def parse_json(self, response):
        request = response.request
        meta = request.meta
        spot: Spot = meta['spot']
        page = meta['page']
        if spot.page_status[page-1]:
            return

        if 'exception' in meta:
            logging.error(f"请求发生错误,spot={spot}")
            yield from self.comment_request(spot, page)
            return
        data = response.json()

        if not ('code' in data and data['code'] == 200) or not ('result' in data and 'items' in data['result']):
            logging.error(f"response 错误, spot={spot}, msg={data.get('msg')}")
            yield from self.comment_request(spot, page)
            return
        comments = data['result']['items']
        if comments is None or len(comments) == 0:
            logging.error(f"comments 数量为0, spot={spot}, msg={data.get('msg')}")
            print(response.text)
            yield from self.comment_request(spot, page)
            return

        def to_date(s):
            """/Date(1612249912000+0800)/ 转日期"""
            res = re.search(r'Date\((\d+)\+(\d*)\)', s)
            time_stamp = int(res.group(1)) / 1000
            return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_stamp))

        for each in comments:
            author_id = 0
            user = each.get('userInfo')
            if user is not None:
                item = TouristItem()
                item['name'] = user.get('userNick', '')
                author_id = user.get('userId', 0)
                item['id'] = author_id
                yield item
            item = CommentItem()
            item['id'] = each['commentId']
            item['spot_id'] = spot.spot_id
            item['author_id'] = author_id
            item['content'] = each['content']
            item['date'] = to_date(each['publishTime'])
            # 点赞数
            if 'usefulCount' in each:
                item['useful'] = each['usefulCount']
            # 分数
            if 'score' in each:
                item['star_level'] = each['score']
            # 照片数量
            if 'images' in each:
                item['img_num'] = len(each['images'])
            yield item
        if spot.page_status[page-1]:
            return
        spot.page_status[page-1] = True
        self.save_config(self.name)
        logging.info(f"{page}/{spot.max_page}")
        page = spot.get_next_page()
        if page > 0:
            yield from self.comment_request(spot, page)
        else:
            if spot.finished:
                return
            spot.finished = True
            # 完成
            yield from self.finish_spot(spot)
            time.sleep(0.1)
            # 爬取下一个
            self.mux = False
            yield from self.next_requests()

    def parse(self, response):
        request = response.request
        meta = request.meta
        spot: Spot = meta.get('spot')
        if 'exception' in meta:
            logging.error(f"请求发生错误, 跳过, spot={spot}")
            # 爬取下一个
            self.mux = False
            yield from self.next_requests()
            return

        if not self.get_profile(response, spot):
            # 爬取下一个
            logging.error(f"景点信息有错, 跳过,spot={spot}")
            self.mux = False
            yield from self.next_requests()
            return
        self.config['breakpoint_spot'] = spot.to_dict()
        self.save_config(self.name)
        num = 0
        for i in range(0, spot.max_page):
            page = spot.get_next_page()
            if page > 0:
                yield from self.comment_request(spot, page)
                num = num + 1
                if num >= self.concurrent_page:
                    break
            else:
                break
        # 开始爬取评论
        # from scrapy.shell import inspect_response
        # inspect_response(response, self)

    def finish_spot(self, spot: Spot):
        # 当前景点爬取完成
        item = CommentFinishedItem()
        item['finished'] = 1
        item['spot_id'] = spot.spot_id
        yield item
        logging.info(f"评论爬取完成,spot={spot}")
        self.config['breakpoint_spot'] = None



if __name__ == '__main__':
    from scrapy import cmdline
    from os import path

    args = f"scrapy crawl {path.basename(__file__).split('.')[0]}".split()
    cmdline.execute(args)
