# -*- coding: utf-8 -*-
# -----------------------------------------
# @Time       : 2020/9/29 15:50
# @Author     : dqz 
# @Email      : 1513727379@qq.com
# @File       : binglog2sql.py
# @Software   : PyCharm
# -----------------------------------------
from datetime import datetime

import pymysql
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.event import QueryEvent, RotateEvent, FormatDescriptionEvent, StopEvent, XidEvent, GtidEvent, \
    BeginLoadQueryEvent, ExecuteLoadQueryEvent, HeartbeatLogEvent, NotImplementedEvent
from pymysqlreplication.row_event import DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, TableMapEvent
from binlog2sql.config import REPLICATION_SETTINGS
from binlog2sql.utils import fix_object, fix_NULL, temp_open, filename


class Binlog2sql(object):
    """
    1. 没有起始时间，起始点位，起始文件：设置为当前文件的当前点位
    2. 任何一个存在的：按存在的处理，不存在不处理
    3. auto_position: 根据gtid设置pos
    start_time->end_time
    start_pos->end_pos
    start_file->end_file
    """

    def __init__(self, conn: dict = None, primary_key: bool = True, start_file: str = None, end_file: str = None,
                 rollback: bool = False, start_time: str = None, stop_time: str = None, only_events: list = None,
                 only_schemas: list = None, auto_position: bool = None, ignored_schemas: list = None,
                 only_dml: bool = True, only_tables: list = None, start_pos: int = 4, end_pos: int = None,
                 auto_stop=False, blocking=False, output_update_diff=False):
        """
        :param conn:            {“host”:host,"port":port,"user":user,"passwd":passwd}
        :param primary_key:     生成的语句是否包含主键
        :param start_file:      binlog分析的起始文件： show master logs
        :param end_file:        binlog分析结束的文件
        :param rollback:        是否生成回滚语句：insert -> delete, update switch set -> where
        :param start_time:      binlog从什么时间点开始分析
        :param stop_time:       binlog分析到什么时间点结束
        :param only_events:     只分析的binlog事件：['BeginLoadQueryEvent', 'BinLogEvent', 'ExecuteLoadQueryEvent', 'FormatDescriptionEvent', 'GtidEvent', 'HeartbeatLogEvent', 'IntvarEvent', 'NotImplementedEvent', 'QueryEvent', 'RotateEvent', 'StopEvent', 'XidEvent', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'binascii', 'byte2int', 'datetime', 'int2byte', 'struct']
        :param only_schemas:    只分析的数据库
        :param auto_position:   是否自动获取log_pos，默认获取最新的点位
        :param ignored_schemas: 过滤掉分析的数据库
        :param only_dml:        是否仅获取dml语句
        :param only_tables:     仅分析的表结构
        :param start_pos:       log_pos的起始点位
        :param end_pos:         log_pos结束分析的点位
        :param auto_stop:       auto_stop是否运行到执行开始时的binlog点位停止
        :param blocking:        blocking流方式处理
        :param output_update_diff:         output_update_diff输出update更改前后值
        """

        # 起始: init
        self.start_time = start_time
        self.stop_time = stop_time
        self.start_file = start_file
        self.end_file = end_file
        self.start_pos = start_pos  # no start_time,start_file:current_pos
        self.end_pos = end_pos  # no start_time,start_file:None
        self.eof_file = None  # 记录执行时当前的binlog_index
        self.eof_pos = None  # 记录执行时的当前log_pos
        self.binlog_indexs = None  # 记录开始执行是的binlog
        # controller no init
        self.primary_key = primary_key  # 生成语句时
        self.rollback = rollback  # 生成语句时
        self.auto_position = auto_position  # 当前的log_pos
        self.only_dml = only_dml  # 事件过滤（最高优先级）
        self.auto_stop = auto_stop  # 如果true,到当前程序运行时的最新点位
        self.blocking = blocking  # 流式执行
        self.output_update_diff = output_update_diff  # 输出update差异信息
        # maybe auto set: if None set it
        self.server_id = None  # 根据连接自动获取
        self.only_events = only_events  # 如果true，最高优先级过滤；最好通过stream._allowed_event_list获取，然后交集only_dml
        # other: accoding to input
        self.only_schemas = only_schemas
        self.ignored_schemas = ignored_schemas
        self.only_tables = only_tables
        self.conn = conn
        self.options = REPLICATION_SETTINGS.copy()
        if start_pos: self.options["log_pos"]=start_pos
        if auto_position: self.options["auto_position"]=auto_position
        self.current_pos = None  # 实时记录当前binlog位置
        self.start_log_pos = None  # 记录启动时binlog位置
        self.event_start_pos = None  # 事件开始的binlog位置

        # TEMPLATE: 增删改查模板
        self.template_insert = "INSERT INTO `{schema}`.`{table}`({fields}) VALUES ({values});"
        self.template_delete = "DELETE FROM `{schema}`.`{table}` WHERE {where} LIMIT 1;"
        self.template_update = "UPDATE `{schema}`.`{table}` SET {update_after} WHERE {update_before} LIMIT 1;"
        self.template_update_rollback = "UPDATE `{schema}`.`{table}` SET {update_before} WHERE {update_after} LIMIT 1;"

        """init connection"""
        if conn:
            # 如果非None的时候用传递的
            assert isinstance(conn, dict), f"Expect: {'host':host,'port':port,'user':user,'passwd':passwd}"
            self.connection = pymysql.connect(**conn)
        else:
            # 如果None,使用配置文件内的配置
            self.connection = pymysql.connect(**self.options.get("connection_settings"))

        """处理参数:初始化，需要用到connection"""
        self.__init_params()

        """处理 BinLogStreamReader params"""
        self.__init_binlog_stream_reader_params()

        """init steam reader"""
        self.stream = BinLogStreamReader(**self.options)
        """
        url: https://python-mysql-replication.readthedocs.io/en/latest/binlogstream.html
        Attributes:
            ctl_connection_settings: Connection settings for cluster holding schema information
            resume_stream: Start for event from position or the latest event of
                           binlog or from older available event
            blocking: Read on stream is blocking
            only_events: Array of allowed events
            ignored_events: Array of ignored events
            log_file: Set replication start log file
            log_pos: Set replication start log pos (resume_stream should be true)
            auto_position: Use master_auto_position gtid to set position
            only_tables: An array with the tables you want to watch (only works
                         in binlog_format ROW)
            ignored_tables: An array with the tables you want to skip
            only_schemas: An array with the schemas you want to watch
            ignored_schemas: An array with the schemas you want to skip
            freeze_schema: If true do not support ALTER TABLE. It's faster.
            skip_to_timestamp: Ignore all events until reaching specified timestamp.
            report_slave: Report slave in SHOW SLAVE HOSTS.
            slave_uuid: Report slave_uuid in SHOW SLAVE HOSTS.
            fail_on_table_metadata_unavailable: Should raise exception if we can't get
                                                table information on row_events
            slave_heartbeat: (seconds) Should master actively send heartbeat on
                             connection. This also reduces traffic in GTID replication
                             on replication resumption (in case many event to skip in
                             binlog). See MASTER_HEARTBEAT_PERIOD in mysql documentation
                             for semantics
        """

    def __init_params(self):
        # 查看是否回滚 和 没有主键同时存在
        if self.rollback and (not self.primary_key):
            raise ValueError("only one of rollback can be True or primary_key is False")

        if self.start_time:
            self.start_time = datetime.strptime(self.start_time, "%Y-%m-%d %H:%M:%S")
        else:
            self.start_time = datetime(1980, 1, 1, 0, 0)
        if self.stop_time:
            self.stop_time = datetime.strptime(self.stop_time, "%Y-%m-%d %H:%M:%S")
        else:
            self.stop_time = datetime.strptime('2999-12-31 00:00:00', "%Y-%m-%d %H:%M:%S")
        assert self.start_time < self.stop_time, f"start_time不能比stop_time大"

        # 获取master info
        try:
            cursor = self.connection.cursor()
            cursor.execute("SHOW MASTER STATUS")
            self.eof_file, self.eof_pos = cursor.fetchone()[:2]
            cursor.execute("SHOW MASTER LOGS")
            self.binlog_indexs = [row[0] for row in cursor.fetchall()]
            cursor.execute("SELECT @@server_id")
            self.server_id = cursor.fetchone()[0]
        except Exception as e:
            raise Exception(f'ERROR: {e}')

        if self.start_file:
            assert self.start_file in self.binlog_indexs, f"{self.start_file} not in show master logs"
        else:
            self.start_file = self.eof_file

        if self.end_file:
            # 目前占时只能是已存在的binlog index: 不存在的时候如果auto_stop,到eof_file的当前pos停止；不存在没有必要设置
            assert self.end_file in self.binlog_indexs

        if self.start_file and self.end_file:
            assert self.start_file <= self.end_file, f"start_file index不能比env_file大"

        if self.start_pos:
            assert isinstance(self.start_pos, int), f"start_pos must be int"

        if self.end_pos:
            assert isinstance(self.end_pos, int), f"end_pos must be int"

        if self.start_pos and self.end_pos:
            assert self.start_pos < self.end_pos

    def __init_binlog_stream_reader_params(self):
        if self.conn:
            self.options["connection_settings"] = self.conn
        self.options["blocking"] = self.blocking
        self.options["only_events"] = self.only_events
        self.options["log_file"] = self.start_file
        self.options["log_pos"] = self.start_pos
        self.options["auto_position"] = self.auto_position
        self.options["only_tables"] = self.only_tables
        self.options["only_schemas"] = self.only_schemas
        self.options['ignored_schemas'] = self.ignored_schemas
        # self.options['ignored_tables']=self.ignored_tables

    @property
    def dml_events(self):
        """insert update delete"""
        return (DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent)

    @property
    def current_events(self):
        """当前支持的events"""
        events = []
        if self.stream:
            only_events = self.stream._allowed_event_list(self.only_events, self.options["ignored_events"],
                                                          self.options["filter_non_implemented_events"])
            if not self.only_dml:
                return tuple(only_events)
            for event in self.dml_events:
                if event in only_events:
                    events.append(event)
        events_set = set(events)
        assert events_set, f'没有可执行的events'
        return frozenset(events_set)

    def __check_events(self, event, only_events):
        """
        检查事件是否可以继续执行:过滤 file->event->pos->time:file初始化已验证存不存在
            only_events，start_time,end_time，start_pos,end_log,start_file,end_file
            auto_stop
        :return: 0-break,1-continue,other_num-pass
        """
        # check file
        if self.end_file:
            if self.stream.log_file > self.end_file:
                return 0

        # record current pos
        if not isinstance(event, (RotateEvent, FormatDescriptionEvent)):
            self.current_pos = event.packet.log_pos

        # 如果当前的事件类型不是可通过的类型，直接过滤(非当前事件过滤)
        if isinstance(event, QueryEvent):
            self.event_start_pos = event.packet.log_pos
        for only_event in only_events:
            # 如果要判断两个类型是否相同推荐使用 isinstance()
            if isinstance(event, only_event):
                break
        else:
            return 1

        # 过滤pos
        if self.start_pos and self.current_pos < self.start_pos:
            return 1
        elif self.end_pos and self.current_pos > self.end_pos:
            return 0
        elif self.auto_stop and self.current_pos > self.eof_pos:
            # 是否自动停止到执行的最新点位停止
            return 0

        # 过滤time
        try:
            event_time = datetime.fromtimestamp(event.timestamp)
        except OSError:
            event_time = datetime(1980, 1, 1, 0, 0, 0)

        if self.start_time and event_time < self.start_time:
            return 1
        elif self.stop_time and event_time > self.stop_time:
            return 0

    def start_process_event(self):
        # 前置获取的信息
        self.start_log_pos = self.current_pos = self.stream.log_pos
        current_events = self.current_events
        with temp_open(filename(), "w") as fw_tmp:
            for event in self.stream:
                # 过滤
                check_status = self.__check_events(event, current_events)
                if check_status == 0:
                    break
                elif check_status == 1:
                    continue

                # 根据事件解析：分发事件处理
                self.handler_event(event, fw_tmp)
            self.stream.close()

    def handler_event(self, event, fw_tmp):
        """
            QueryEvent,RotateEvent,StopEvent,FormatDescriptionEvent,
            XidEvent,GtidEvent,BeginLoadQueryEvent,ExecuteLoadQueryEvent,
            UpdateRowsEvent,WriteRowsEvent,DeleteRowsEvent,
            TableMapEvent,HeartbeatLogEvent,NotImplementedEvent,
        """
        # print(event)
        # try:
        #     print(event.query)
        # except:
        #     pass

        if isinstance(event, QueryEvent):
            self.__Query_Event(event, fw_tmp)
        elif isinstance(event, UpdateRowsEvent):
            self.__UpdateRows_Event(event, fw_tmp)
        elif isinstance(event, WriteRowsEvent):
            self.__WriteRows_Event(event, fw_tmp)
        elif isinstance(event, DeleteRowsEvent):
            self.__DeleteRows_Event(event, fw_tmp)
        elif isinstance(event, RotateEvent):
            self.__Rotate_Event(event, fw_tmp)
        elif isinstance(event, StopEvent):
            self.__Stop_Event(event, fw_tmp)
        elif isinstance(event, FormatDescriptionEvent):
            self.__FormatDescription_Event(event, fw_tmp)
        elif isinstance(event, XidEvent):
            self.__Xid_Event(event, fw_tmp)
        elif isinstance(event, GtidEvent):
            self.__Gtid_Event(event, fw_tmp)
        elif isinstance(event, BeginLoadQueryEvent):
            self.__BeginLoadQuery_Event(event, fw_tmp)
        elif isinstance(event, ExecuteLoadQueryEvent):
            self.__BeginLoadQuery_Event(event, fw_tmp)
        elif isinstance(event, TableMapEvent):
            self.__TableMap_Event(event, fw_tmp)
        elif isinstance(event, HeartbeatLogEvent):
            self.__HeartbeatLog_Event(event, fw_tmp)
        elif isinstance(event, NotImplementedEvent):
            self.__NotImplemented_Event(event, fw_tmp)
        else:
            fw_tmp.write(f"Not implement! {event}\n")

    def __Query_Event(self, event, fw_tmp):
        if event.query not in ('BEGIN', "COMMIT"):
            schema = event.schema
            if self.ignored_schemas and schema in self.ignored_schemas:
                return
            elif self.only_schemas:
                # 存在only_schemas
                if schema in self.only_schemas:
                    pass
                else:
                    return
            sql = f'USE {schema}'
            sql += f"{fix_object(event.query)}"
            print(sql)

    def __UpdateRows_Event(self, event, fw_tmp):
        for row in event.rows:
            schema = event.schema
            table = event.table
            if self.rollback:
                update_before = ', '.join(['`%s`=%%s' % key for key in row["after_values"].keys()])
                update_after = ' AND '.join(map(fix_NULL, row['before_values'].items()))
                template = self.template_update_rollback.format(schema=schema, table=table, update_before=update_before,
                                                                update_after=update_after)
                fill_values = map(fix_object, list(row['before_values'].values()) + list(row["after_values"].values()))
            else:
                update_before = ' AND '.join(map(fix_NULL, row['before_values'].items()))
                update_after = ', '.join(['`%s`=%%s' % key for key in row["after_values"].keys()])
                template = self.template_update.format(schema=schema, table=table, update_before=update_before,
                                                       update_after=update_after)
                fill_values = map(fix_object, list(row['after_values'].values()) + list(row["before_values"].values()))
            if self.output_update_diff is True:
                diff_info = []
                for key in row["before_values"].keys():
                    before = row["before_values"].get(key)
                    after = row["after_values"].get(key)
                    if before != after:
                        diff_info.append(f"{key}: {before} -> {after}")
                diff_string = "\n# " + "; ".join(diff_info)
                fw_tmp.write(diff_string + '\n')
                print(diff_string)
            self.__output_sql(event, template, list(fill_values), fw_tmp)

    def __WriteRows_Event(self, event, fw_tmp):
        for row in event.rows:
            if self.primary_key is False:
                if event.primary_key:
                    row["values"].pop(event.primary_key)
            schema = event.schema
            table = event.table
            if self.rollback:
                where = ' AND '.join(map(fix_NULL, row['values'].items()))
                template = self.template_delete.format(schema=schema, table=table, where=where)
            else:
                fields = ', '.join(map(lambda key: f'`{key}`', row["values"].keys()))
                values = ', '.join(['%s'] * len(row['values']))
                template = self.template_insert.format(schema=schema, table=table, fields=fields, values=values)
            fill_values = map(fix_object, row["values"].values())
            self.__output_sql(event, template, list(fill_values), fw_tmp)

    def __DeleteRows_Event(self, event, fw_tmp):
        for row in event.rows:
            schema = event.schema
            table = event.table
            if self.rollback:
                fields = ', '.join(map(lambda key: f'`{key}`', row["values"].keys()))
                values = ', '.join(['%s'] * len(row['values']))
                template = self.template_insert.format(schema=schema, table=table, fields=fields, values=values)
            else:
                where = ' AND '.join(map(fix_NULL, row["values"].items()))
                template = self.template_delete.format(schema=schema, table=table, where=where)
            fill_values = map(fix_object, row["values"].values())
            self.__output_sql(event, template, list(fill_values), fw_tmp)

    def __Rotate_Event(self, event, fw_tmp):
        pass

    def __Stop_Event(self, event, fw_tmp):
        pass

    def __FormatDescription_Event(self, event, fw_tmp):
        pass

    def __Xid_Event(self, event, fw_tmp):
        pass

    def __Gtid_Event(self, event, fw_tmp):
        pass

    def __BeginLoadQuery_Event(self, event, fw_tmp):
        pass

    def __ExecuteLoadQuery_Event(self, event, fw_tmp):
        pass

    def __TableMap_Event(self, event, fw_tmp):
        pass

    def __HeartbeatLog_Event(self, event, fw_tmp):
        pass

    def __NotImplemented_Event(self, event, fw_tmp):
        pass

    def __output_sql(self, event, template, values, fw_tmp):
        with self.connection.cursor() as cursor:
            sql = cursor.mogrify(template, values)
            event_time = datetime.fromtimestamp(event.timestamp)
            log_pos = event.packet.log_pos
            sql += f" #start {self.event_start_pos} end {log_pos} time {event_time}"
            if self.rollback:
                fw_tmp.write(sql + '\n')

            print(sql)
