import gc
import io
import logging
import math
import multiprocessing
import os
import re
import sys
import codecs
import json
import secrets
import time
from os import remove
from time import strftime, localtime
#import MySQLdb
#import MySQLdb.cursors
#SQL Server的连接库，需下载
import binascii
import pymssql
from geomet import wkb
from geomet import wkt
#from pymysqlreplication.row_event import DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent
from pg_chameleon import sql_token, ColumnType
from pg_chameleon.lib.parallel_replication import BinlogTrxReader, MyGtidEvent, modified_my_gtid_event
from pg_chameleon.lib.pg_lib import pg_engine
from pg_chameleon.lib.sql_util import SqlTranslator, DBObjectType
from pg_chameleon.lib.task_lib import CopyDataTask, CreateIndexTask, ReadDataTask
from pg_chameleon.lib.task_lib import TableMetadataTask, ColumnMetadataTask, Pair

POINT_PREFIX_LEN = len('POINT ')
POLYGON_PREFIX_LEN = len('POLYGON ')
LINESTR_PREFIX_LEN = len('LINESTRING ')
WKB_PREFIX_LEN = 4
MARIADB = "mariadb"
LOG_LEVEL_INDEX = 3


BYTE_TO_MB_CONVERSION: int = 1024  * 1024
# we set the default max size of csv file is 2M
DEFAULT_MAX_SIZE_OF_CSV: int = 2 * 1024 * 1024
DEFAULT_SLICE_SIZE = 100000
MINIUM_QUEUE_SIZE: int = 5
# max_execution_time for MySQL(ms)
MAX_EXECUTION_TIME = 0
RANDOM_STR = "RANDOM_STR_SUFFIX"

BINARY_BASE = 2
# 6 means the significant figures, g is python format
DEFAULT_FLOAT_FORMAT_STR = '{:6g}'
# 18 means the significant figures, 16 means the precision, e is python format
DEFAULT_DOUBLE_FORMAT_STR = '{:18.16e}'
CSV_META_SUB_DIR = "chameleon"
CSV_DATA_SUB_DIR = "chameleon" + os.sep + "data"
DATA_NUM_FOR_PARALLEL_INDEX = 100000
DATA_NUM_FOR_A_SLICE_CSV = 1000000

# retry count for migration error tables
RETRY_COUNT = 3

#migration progress file
TABLE_PROGRESS_FILE = "tables.progress"
#index task file
INDEX_FILE = "tables.index"
#cursor type index
DICTCURSOR_INDEX = 0
SSCURSOR_INDEX = 1
SSDICTCURSOR_INDEX = 2
USER_NOT_EXIST_ERROR_CODE = '42704'


class process_state():
    PRECISION_START = 0
    COUNT_EMPTY = 0
    PRECISION_SUCCESS = 1
    PENDING_STATUS = 1
    PROCESSING_STATUS = 2
    ACCOMPLISH_STATUS = 3
    FAIL_STATUS = 6

    @classmethod
    def is_precision_success(self, status):
        return status == self.PRECISION_SUCCESS

    @classmethod
    def is_fail_status(self, status):
        return status == self.FAIL_STATUS

#################################
#该类初始化了三个不同类型的游标，在read_data_process方法中使用到
#分别是缓冲游标（cursor_buffered）、非缓冲游标（cursor_unbuffered）和字典形式非缓冲游标（cursor_dict_unbuffered）
#这些游标是通过传入的 mysql_source_obj 对象的 get_connect 方法获取的连接来创建的
#需实现get_connect方法
# pymssql库中cursor_type默认有0、1、2三种游标类型，对cursor_buffered、cursor_unbuffered、cursor_dict_unbuffered 分别找到对应的pymssql类型进行实现
##################################
class reader_cursor_manager(object):
    def __init__(self, sqlserver_source_obj):
          # 创建标准游标（非字典）
        self.cursor_buffered = sqlserver_source_obj.get_connect().cursor(as_dict=True)
        self.cursor_unbuffered =sqlserver_source_obj.get_connect(cursor_type=1).cursor()
         # 创建字典游标
        self.cursor_dict_unbuffered = sqlserver_source_obj.get_connect(cursor_type=1).cursor(as_dict=True)

    def __del__(self):
        self.close()

    def close(self):
        self.cursor_buffered.close()
        self.cursor_unbuffered.close()
        self.cursor_dict_unbuffered.close()

class sqlserver_source(object):
    def __init__(self):
        """
            Constructor for SQL Server source class.
        """
##########################################################################################################################################################################        
#初始化类的一些属性，先暂时不管MySQL与SQLServer的列类型的区别
##########################################################################################################################################################################
        #这些语句初始化了类的一些变量，包括跳过的 SQL 语句、模式到表的映射、启用压缩的表、模式加载状态、模式列表等。
 
        self.statement_skip = ['BEGIN', 'COMMIT']
        self.schema_tables = {}
        self.enable_compress_tables = {}
        self.schema_mappings = {}
        self.schema_loading = {}
        self.schema_list = []
        #暂时使用mysql的列类型
        #这些语句初始化了与列类型相关的变量，包括总是转换为十六进制的类型、PostGIS 空间数据类型和常见空间数据类型。
        self.hexify_always = ColumnType.get_mysql_hexify_always_type()
        self.postgis_spatial_datatypes = ColumnType.get_mysql_postgis_spatial_type()
        self.common_spatial_datatypes = ColumnType.get_mysql_common_spatial_type()
        #这些语句初始化了一些其他的变量和标志，如仅模式、GTID 模式和启用标志、解码映射、转换映射等。
        self.schema_only = {}
        self.gtid_mode = False
        self.gtid_enable = False
        self.decode_map = {}
        self.convert_map = {}
        self.write_task_queue = None
        self.read_task_queue = None
        self.is_mariadb = False
        self.version = 0
        """
            This queue index_waiting_queue is used to temporarily store index tasks.
            After all data read tasks are complete,
            all index tasks are removed from the queue and added to the write_task_queue.
            这些语句初始化了一些队列和元数据队列，用于在数据迁移和处理过程中临时存储数据任务和元数据。
        """
        self.index_waiting_queue = None
        self.table_metadata_queue = None
        self.column_metadata_queue = None
        self.__init_decode_map()#这行代码调用了类的私有方法 __init_decode_map()，用于初始化解码映射。
        self.enable_compress = False
        #这些语句初始化了一些其他的对象和标志，如启用压缩、SQL 翻译器、仅迁移索引标志、迁移进度字典、写进度文件锁等。
        self.sql_translator = SqlTranslator()
        self.only_migration_index = False
        self.migration_progress_dict = None
        self.write_progress_file_lock = None
        self.need_migration_tables_number = 0
        self.complted_tables_number_before = 0
        self.only_migration_index = False


    @classmethod
    def initJson(cls):
        manager = multiprocessing.Manager()
        global managerJson
        managerJson = manager.dict({"total": {}})
        global totalRecord
        totalRecord = manager.dict({"totalRecord": 0})
        global totalData
        totalData = manager.dict({"totalData": 0})
        global INITIAL_TIME
        start_time = time.time()
        INITIAL_TIME = manager.dict({"initialTime": int(start_time)})

    @classmethod
    def getmanagerJson(cls):
        return managerJson

    @classmethod
    def __decode_hexify_value(cls, origin_value, numeric_scale):
        return binascii.hexlify(origin_value).decode().upper()

    @classmethod
    def __decode_json_value(cls, origin_value, numeric_scale):
        return str(cls.__decode_dic_keys(origin_value)).replace("'", "\"")

    @classmethod
    def __decode_postgis_spatial_value(cls, origin_value, numeric_scale):
        return cls.__get_text_spatial(origin_value).upper()

    @classmethod
    def __decode_binary_value(cls, origin_value, numeric_scale):
        if not isinstance(origin_value, bytes):
            return origin_value
        return origin_value.decode()

    @classmethod
    def __decode_point_value(cls, origin_value, numeric_scale):
        return wkt.dumps(wkb.loads(origin_value[WKB_PREFIX_LEN:]))[POINT_PREFIX_LEN:].replace(' ', ',')

    @classmethod
    def __decode_polygon_value(cls, origin_value, numeric_scale):
        return wkt.dumps(wkb.loads(origin_value[WKB_PREFIX_LEN:]))[POLYGON_PREFIX_LEN:].replace(', ', '),(').replace(' ', ',')

    @classmethod
    def __decode_linestr_value(cls, origin_value, numeric_scale):
        return '[' + wkt.dumps(wkb.loads(origin_value[WKB_PREFIX_LEN:]))[LINESTR_PREFIX_LEN:].replace(', ', '),(').replace(' ', ',') + ']'

    @classmethod
    def __decode_float_value(cls, float_type):
        def __real_decode(origin_value, numeric_scale):
            if not numeric_scale or numeric_scale == 'NULL':
                format_str = DEFAULT_FLOAT_FORMAT_STR if (
                            float_type == ColumnType.M_FLOAT.value) else DEFAULT_DOUBLE_FORMAT_STR
            else:
                format_str = '{:.' + str(numeric_scale) +'f}'
            return float(format_str.format(origin_value))
        return __real_decode

    @classmethod
    def __decode_bit_value(cls, origin_value, numeric_scale):
        return origin_value

    @classmethod
    def __decode_set_value(cls, origin_value, numeric_scale):
        value = str(origin_value)
        binlog_list = value[1:len(value) - 1].replace("'", "").split(", ")
        type_list = numeric_scale[4:len(numeric_scale) - 1].replace("'", "").split(",")
        result_list = []
        for element in type_list:
            if element in binlog_list:
                result_list.append(element)
        return ",".join(result_list)

    @classmethod
    def __decode_default_value(cls, origin_value, numeric_scale):
        return origin_value

    def __init_decode_map(self):
        self.decode_map = {
            ColumnType.M_C_GIS_POINT.value: self.__decode_point_value,
            ColumnType.M_C_GIS_GEO.value: self.__decode_point_value,
            ColumnType.M_C_GIS_LINESTR.value: self.__decode_linestr_value,
            ColumnType.M_C_GIS_POLYGON.value: self.__decode_polygon_value,
            ColumnType.M_JSON.value: self.__decode_json_value,
            ColumnType.M_BINARY.value: self.__decode_binary_value,
            ColumnType.M_VARBINARY.value: self.__decode_binary_value,
            ColumnType.M_FLOAT.value: self.__decode_float_value(ColumnType.M_FLOAT.value),
            ColumnType.M_DOUBLE.value: self.__decode_float_value(ColumnType.M_DOUBLE.value),
            ColumnType.M_SET.value: self.__decode_set_value,
            ColumnType.M_BIT.value: self.__decode_bit_value
        }
        for v in self.hexify_always:
            self.decode_map[v] = self.__decode_hexify_value
        for v in self.postgis_spatial_datatypes:
            self.decode_map[v] = self.__decode_postgis_spatial_value


    @classmethod
    def __convert_binary_value(cls, origin_value, numeric_precision):
        return '\\x' + binascii.hexlify(origin_value).decode().upper()

    @classmethod
    def __lzeropad(cls, bits, numeric_precision):
        if len(bits) < numeric_precision:
            return '0'*(numeric_precision - len(bits)) + bits
        else:
            return bits[len(bits) - numeric_precision :]

    @classmethod
    def __convert_bit_value(cls, origin_value, numeric_precision):
        return cls.__lzeropad(''.join(format(byte, '08b') for byte in origin_value), numeric_precision)

    @classmethod
    def __convert_default_value(cls, origin_value, numeric_precision):
        return origin_value
     # 和列转化相关，后续更改
#    def __init_convert_map(self):
#        self.convert_map = {
#            ColumnType.M_BINARY.value: self.__convert_binary_value,
#            ColumnType.M_BIT.value: self.__convert_bit_value,
#            ColumnType.M_C_GIS_POINT.value: self.__decode_point_value,
#            ColumnType.M_C_GIS_GEO.value: self.__decode_point_value,
#            ColumnType.M_C_GIS_POLYGON.value: self.__decode_polygon_value,
#            ColumnType.M_C_GIS_LINESTR.value: self.__decode_linestr_value,
#        }
#        for v in self.hexify:
#            self.convert_map[v] = self.__decode_hexify_value
#        for v in self.postgis_spatial_datatypes:
#            self.convert_map[v] = self.__decode_postgis_spatial_value
#        self.convert_type_set = {ColumnType.M_BINARY.value, ColumnType.M_BIT.value, ColumnType.M_C_GIS_POINT.value,
#        ColumnType.M_C_GIS_GEO.value, ColumnType.M_C_GIS_POLYGON.value, ColumnType.M_C_GIS_LINESTR.value}
#        self.convert_type_set.update(self.hexify, self.postgis_spatial_datatypes)
#
##
#    def __del__(self):
#        """
#            Class destructor, tries to disconnect the mysql connection.
#        """
#        self.disconnect_db_unbuffered()
#        self.disconnect_db_buffered()
############################################################################################################################        
    
    def connect_db_buffered(self):
        """
        The method creates a new connection to the SQL Server database.
        The connection is made using the pytds library, and a DictCursor is used for fetching results.
        """
        db_conn = self.source_config["db_conn"]
        db_conn = {key:str(value) for key, value in db_conn.items()}
        db_conn["port"] = int(db_conn["port"])
        db_conn["timeout"] = int(db_conn["timeout"])

    # Remove the as_dict parameter and use server instead of dsn
        self.conn_buffered = pymssql.connect(
            server=db_conn["host"], 
            database=db_conn["database"],
            user=db_conn["user"],
            password=db_conn["password"],
            port=db_conn["port"],
            login_timeout=db_conn["timeout"]  # pymssql uses login_timeout instead of timeout
        )
    # Create a cursor that returns dictionaries
        self.cursor_buffered = self.conn_buffered.cursor(as_dict=True)
        self.charset = db_conn["charset"]
        print("连接sqlserver成功")




    def disconnect_db_buffered(self):
            """
                The method disconnects any connection  with dictionary type cursor from the mysql database.

            """
            try:
                self.conn_buffered.close()
            except:
                pass

    def get_connect(self, cursor_type=0):
         # 获取数据库连接配置
        db_conn = self.source_config["db_conn"]
        # 将所有值转换为字符串或适当的数据类型
        db_conn = {key:str(value) for key, value in db_conn.items()}
        db_conn["port"] = int(db_conn["port"])
        db_conn["timeout"] = int(db_conn["timeout"])
        conn = pymssql.connect(
            server=db_conn["host"],
            user=db_conn["user"],
            password=db_conn["password"],
            database=db_conn["database"],  # 需要指定数据库名
            port=db_conn["port"],
            timeout=db_conn["timeout"]
            )
        # 返回连接
        return conn
    
####################################
#286用于检查 MySQL 和 openGauss 数据库中的表名大小写设置是否一致
#init_replica(4)修改自mysql_lib.py
####################################
    def check_lower_case_table_names(self):
        """
            检查 SQLServer 和 openGauss 中 lower_case_table_names 参数是否一致。
            如果不一致，则迁移过程将退出。
        """
        print("===============check_lower_case_table_names======================")
        # 查询 SQL Server 的大小写设置
        lower_case_sqlserver_sql = """
            SELECT collation_name AS CollationName
            FROM sys.databases
            WHERE name = DB_NAME();
            """
        #SQL_Latin1_General_CP1_CI_AS:使用Latin1字符集,不区分字母的大小写,区分字母的重音符号
        self.cursor_buffered.execute(lower_case_sqlserver_sql)
        #collation_sql_server = self.cursor_buffered.fetchone()["CollationName"]
        collation_results = self.cursor_buffered.fetchall()
        collation_sql_server = collation_results[0]["CollationName"] if collation_results else None
        lower_case_opengauss_sql = """show dolphin.lower_case_table_names;"""
        stmt = self.pg_engine.pgsql_conn.prepare(lower_case_opengauss_sql)
        lower_case_opengauss = int(stmt.first())
       
        # 判断 SQL Server 的大小写敏感性
        if 'CI' in collation_sql_server:
            case_sensitive_sql_server = 1
        else:
            case_sensitive_sql_server = 0
        
        print("SQL Server collation: %s" % collation_sql_server)
        print("SQL Server case sensitive: %s" % case_sensitive_sql_server)
        print("openGauss lower_case_table_names: %s" % lower_case_opengauss)

        if lower_case_opengauss > 1:
            lower_case_opengauss = 1

        if case_sensitive_sql_server != lower_case_opengauss:
            self.logger.error("The collation setting in SQL Server is %s (0: case sensitive, 1: case insensitive) "
                          "and the parameter dolphin.lower_case_table_names in openGauss is %s. "
                          "They are inconsistent, the migration will exit."
                          % (case_sensitive_sql_server, lower_case_opengauss))
            os._exit(0)

        print("===============check_lower_case_table_names end======================")

    

    
####################################
#308检查SQLServer配置是否兼容复制要求
#需具SQLServer的复制参数配置？
#init_replica(3)
####################################
    def check_sqlserver_config(self, is_strict=False):
        """
            目前暂不清楚SQLServer开启复制时必须要求配置的参数；
            对于Mysql而言，配置的参数有：log_bin、binlog_format、binlog_row_image
            要求是 log_bin：二进制日志是否启用（必须为 ON）。
                   binlog_format：二进制日志格式（必须为 ROW）。
                   binlog_row_image：二进制日志行图像（必须为 FULL）。
        """
        #删除mysql检查参数的代码，SQLServer不适用；需要调研？
        self.__check_sqlserver_param(is_strict)

####################################
#345检查SQLServer配置是否兼容复制要求
#init_replica(3),check_sqlserver_config(1)
##working……
####################################
    def __check_sqlserver_param(self, is_strict):
        #mysql该代码中检查了MySQL版本，gtid_mode设置，以及3个二进制日志标识
        #SQLServer似乎没有GTID全局事务标识符这个概念，需要调研？
        pass

####################################
#515根据源设置的 limit_tables 和 skip tables 值构建两个字典
#init_replica(6)
#使用mysql_lib源码
####################################
    def __build_table_exceptions(self):
        """
        该方法根据源设置的 limit_tables 和 skip tables 值构建两个字典。
        这些字典旨在用于 get_table_list，以清理每个模式下的表列表。
        该方法处理类变量 self.tables 被设置的特殊情况。
        在这种情况下，只有 self.tables 中指定的表会被同步。如果 limit_tables 已经设置，
        则结果列表是 self.tables 和 limit_tables 的交集。
        """
        print("===============__build_table_exceptions======================")
        self.limit_tables = {}
        self.skip_tables = {}
        limit_tables = self.source_config["limit_tables"]
        skip_tables = self.source_config["skip_tables"]

        if self.tables !='*':
            tables = [table.strip() for table in self.tables.split(',')]
            if limit_tables:
                limit_schemas = [table.split('.')[0] for table in limit_tables]
                limit_tables = [table for table in tables if table in limit_tables or table.split('.')[0] not in limit_schemas]
            else:
                limit_tables = tables
            self.schema_only = {table.split('.')[0] for table in limit_tables}


        if limit_tables:
            table_limit = [table.split('.') for table in limit_tables]
            for table_list in table_limit:
                list_exclude = []
                try:
                    list_exclude = self.limit_tables[table_list[0]]
                    list_exclude.append(table_list[1])
                except KeyError:
                    try:
                        list_exclude.append(table_list[1])
                    except IndexError:
                        pass

                self.limit_tables[table_list[0]]  = list_exclude
        if skip_tables:
            table_skip = [table.split('.') for table in skip_tables]
            for table_list in table_skip:
                list_exclude = []
                try:
                    list_exclude = self.skip_tables[table_list[0]]
                    list_exclude.append(table_list[1])
                except KeyError:
                    try:
                        list_exclude.append(table_list[1])
                    except:
                        pass
                self.skip_tables[table_list[0]]  = list_exclude

        self.__get_compress_table()

    def __get_compress_table(self):
        if self.enable_compress:
            self.compress_tables = {}
            compress_tables = self.source_config["compress_tables"]
            if compress_tables:
                table_compress = [table.split('.') for table in compress_tables]
                self.__parse_compress_table_list(table_compress)

    def __parse_compress_table_list(self, table_compress):
        for table_list in table_compress:
            list_exclude = []
            try:
                list_exclude = self.compress_tables[table_list[0]]
                list_exclude.append(table_list[1])
            except KeyError:
                try:
                    list_exclude.append(table_list[1])
                except IndexError:
                    pass
            self.compress_tables[table_list[0]] = list_exclude

####################################
#589
#init_replica(7)
####################################
    def get_table_list(self):
        """
        从 information_schema 中提取表列表，结果存储在以表的模式为键的字典中。
        """
        print("===============get_table_list======================")
        ##在 SQL Server 中，可以使用以下查询来获取表的名称和行数：
        sql_tables="""
        SELECT 
            t.name AS table_name,
            SUM(p.rows) AS table_rows
        FROM 
            sys.tables AS t
        INNER JOIN 
            sys.partitions AS p ON t.object_id = p.object_id
        WHERE 
            t.is_ms_shipped = 0 
            AND t.type = 'U'     
            AND p.index_id IN (0, 1) 
        GROUP BY 
            t.name;         
        """
        for schema in self.schema_list:
            self.cursor_buffered.execute(f"use {schema};")
            self.cursor_buffered.execute(sql_tables)
            table_list = []
            table_rows = []
            for table in self.cursor_buffered.fetchall():
                table_list.append(table["table_name"])
                table_rows.append(table["table_rows"])
            # 打印结果
            print(f"Schema: {schema}")
            print("Tables and Rows:")
            for table_name, table_row in zip(table_list, table_rows):
                print(f"Table: {table_name}, Rows: {table_row}")            

            try:
                limit_tables = self.limit_tables[schema]
                if len(limit_tables) > 0:
                    table_list = [table for table in table_list if table in limit_tables]
            except KeyError:
                pass
            try:
                skip_tables = self.skip_tables[schema]
                if len(skip_tables) > 0:
                    table_list = [table for table in table_list if table not in skip_tables]
            except KeyError:
                pass

            self.get_compress_tables(schema, table_list)##子方法1

            self.schema_tables[schema] = self.filter_table_list_from_csv_file(schema, table_list)##子方法2

            # if self.is_skip_completed_tables:#该参数不需要了？表示跳过已完成的表
            #     completed_schema_tables = self.get_completed_tables()##子方法3
            #     ##子方法4
            #     self.schema_tables[schema] = self.filter_table_list_from_progress(schema, table_list, completed_schema_tables) 
            #     self.need_migration_tables_number += len(self.schema_tables[schema])

            if self.dump_json:
                for index, key in enumerate(table_list):
                    managerJson.update({key: {
                        "name": key,
                        "status": process_state.ACCOMPLISH_STATUS
                        if table_rows[index] == process_state.COUNT_EMPTY else process_state.PENDING_STATUS,
                        "percent": process_state.PRECISION_START,
                        "error": ""
                    }})

####################################
#730get_compress_tables
#init_replica(7),get_table_list(1-4)
####################################
    def get_compress_tables(self, schema, table_list):
        if self.enable_compress:
            try:
                compress_tables = self.compress_tables[schema]
                if len(compress_tables) > 0:
                    self.enable_compress_tables[schema] = [table for table in table_list if table in compress_tables]
                else:
                    self.enable_compress_tables[schema] = table_list
            except KeyError:
                self.enable_compress_tables[schema] = table_list
                pass

    def filter_table_list_from_csv_file(self, schema, table_list):
        """
            The method filters table list from csv files.

            :param schema: the schema name
            :param table_list: the table list
            :return: the table list filtered by csv files
            :rtype: list
        """
        if len(self.csv_dir) == 0:
            self.logger.warning("csv dir is empty, so copy data according to select query for all mysql schemas.")
        else:
            csv_table_list = []
            for table in table_list:
                table_csv_file = self.csv_dir + os.path.sep + schema + "_" + table + ".csv"
                if os.path.exists(table_csv_file):
                    csv_table_list.append(table)
            if len(csv_table_list) == 0:
                self.logger.warning("csv dir exists, but no valid csv file, so copy data according to select query "
                                    "for %s schema." % schema)
            else:
                self.logger.info("csv dir is valid, so copy data according to existed csv files for %s schema."
                                 % schema)
                table_list = [table for table in table_list if table in csv_table_list]
        return table_list

    def get_completed_tables(self):
        """
            The method get migration completed tables of each schema.

            :return: the dict recording completed tables of each schema
            :rtype: dictionary
        """
        completed_schema_tables = {}
        with open(self.progress_file, 'r') as fr:
            while True:
                completed_line = fr.readline()[:-1]
                if completed_line:
                    completed_schema = completed_line.split('`.`')[0][1:]
                    completed_table = completed_line.split('`.`')[1][:-1]
                    if completed_schema in completed_schema_tables:
                        completed_schema_tables[completed_schema].add(completed_table)
                    else:
                        completed_schema_tables[completed_schema] = {completed_table}
                else:
                    break
        return completed_schema_tables

    def filter_table_list_from_progress(self, schema, table_list, completed_schema_tables):
        """
            The method filters table list from progress file.

            :param schema: the schema name
            :param table_list: the table list
            :param completed_schema_tables: dict record completed tables of each schema
            :return: the table list filtered by progress file.
            :rtype: list
        """
        if schema in completed_schema_tables:
            self.complted_tables_number_before += len(completed_schema_tables[schema])
            table_list = [table for table in table_list if table not in completed_schema_tables[schema]]
        return table_list
    
####################################
#805create_destination_schemas
#init_replica(9)
####################################
    def create_destination_schemas(self):
        """
            Creates the loading schemas in the destination database and associated tables listed in the dictionary
            self.schema_tables.
            The method builds a dictionary which associates the destination schema to the loading schema.
            The loading_schema is named after the destination schema plus with the prefix _ and the _tmp suffix.
            As postgresql allows, by default up to 64  characters for an identifier, the original schema is truncated to 59 characters,
            in order to fit the maximum identifier's length.
            The mappings are stored in the class dictionary schema_loading.
            If the source parameter keep_existing_schema is set to true the method doesn't create the schemas.
            Instead assumes the schema and the tables are already there.
        """
        print("===============create_destination_schemas======================")
        # if self.keep_existing_schema:
        #     self.logger.debug("Keep existing schema is set to True. Skipping the schema creation." )
        #     for schema in self.schema_list:
        #         destination_schema = self.schema_mappings[schema]
        #         self.schema_loading[schema] = {'destination': destination_schema, 'loading': destination_schema}
        #         print("Destination schema: %s, Loading schema: %s" % (destination_schema, destination_schema))
        # else:
        for schema in self.schema_list:
            destination_schema = self.schema_mappings[schema]
            loading_schema = "_%s_tmp" % destination_schema[0:59]
            self.schema_loading[schema] = {'destination': destination_schema, 'loading': loading_schema}
            print("Destination schema: %s, Loading schema: %s" % (destination_schema, loading_schema))
            # drop tmp schema if exist in last migration.
            #if not self.is_skip_completed_tables:
            self.logger.debug("Dropping the existing tmp schema %s." % loading_schema)
            self.pg_engine.drop_database_schema(loading_schema, True)
            self.logger.debug("Creating the loading schema %s." % loading_schema)
            self.pg_engine.create_database_schema(loading_schema)
            self.logger.debug("Creating the destination schema %s." % destination_schema)
            self.pg_engine.create_database_schema(destination_schema)


    def drop_destination_tables(self):
        """
            The method drop not completed destination tables before migration.
        """
        self.logger.info("Start to drop failed tables before.")
        for schema in self.schema_tables:
            table_list = self.schema_tables[schema]
            for table in table_list:
                self.pg_engine.drop_failed_table(schema, table)
        self.logger.info("Finish dropping failed tables before.")

    def drop_loading_schemas(self):
        """
            The method drops the loading schemas from the destination database.
            The drop is performed on the schemas generated in create_destination_schemas.
            The method assumes the class dictionary schema_loading is correctly set.
        """
        for schema in self.schema_loading:
            loading_schema = self.schema_loading[schema]["loading"]
            self.logger.debug("Dropping the schema %s." % loading_schema)
            self.pg_engine.drop_database_schema(loading_schema, True)

    # def get_partition_metadata(self, table, schema):
    #     """
    #         The method builds the table's partition metadata querying the information_schema.
    #         The data is returned as a dictionary.

    #         :param table: The table name
    #         :param schema: The table's schema
    #         :return: table's partition metadata as a cursor dictionary
    #         :rtype: dictionary
    #     """
    #     #sys.partitions 中没有子分区的相关信息，因此 
    #     # subpartition_ordinal_position, subpartition_name, 
    #     # subpartition_method, subpartition_expression 都设置为 NULL。
    #     sql_metadata="""
    #         SELECT DISTINCT
    #             partition_ordinal_position as partition_ordinal_position,
    #             subpartition_ordinal_position as subpartition_ordinal_position,
    #             subpartition_name as subpartition_name,
    #             subpartition_method as subpartition_method,
    #             subpartition_expression as subpartition_expression,
    #             partition_name as partition_name,
    #             partition_method as partition_method,
    #             partition_expression as partition_expression,
    #             partition_description as partition_description,
    #             tablespace_name as tablespace_name
    #         FROM
    #             information_schema.partitions
    #         WHERE
    #                 table_schema=%s
    #             AND table_name=%s
    #         ORDER BY
    #             partition_ordinal_position
    #         ;
    #     """
    #     self.cursor_buffered.execute(sql_metadata, (schema, table))
    #     partition_metadata=self.cursor_buffered.fetchall()
    #     return partition_metadata

    def get_table_metadata(self, table, schema):
        print("===============get_table_metadata======================")
                #         COLUMN_NAME,
                # COLUMN_DEFAULT,
                # TYPE_FORMAT,
                # COL_SERIAL
        sql_metadata = """
            SELECT
                column_name as column_name,
                column_default as column_default,
                ordinal_position as ordinal_position,
                data_type as data_type,
                character_maximum_length as character_maximum_length,
                is_nullable as is_nullable,
                numeric_precision as numeric_precision,
                numeric_scale as numeric_scale,
                character_set_name as character_set_name,
                collation_name as collation_name
            FROM
                information_schema.COLUMNS
            WHERE
                table_schema=%s
                AND table_name=%s
            ORDER BY
                ordinal_position
            ;
        """
        #where schema_name=%s and table_name=%s
        # 打印即将执行的SQL查询和参数
        print(f"Schema: {schema}, Table: {table}")
        self.cursor_buffered.execute(sql_metadata, (schema, table))
        table_metadata = self.cursor_buffered.fetchall()
        # 打印查询结果
        print("=========Fetched table metadata:========")
        print(table_metadata)
        return table_metadata

    # def get_table_info(self, table, schema):
    #     """
    #         The method gets the comment of the table.

    #         :param table: the table name
    #         :param schema: the table schema
    #         :return: the table's comment
    #     """
    #     sql = """
    #         SELECT 
    #             t.name AS TableName,
    #             s.name AS SchemaName,
    #             p.rows AS RowCounts,
    #             t.create_date AS CreationDate,
    #             t.modify_date AS LastModifiedDate
    #         FROM 
    #             sys.tables t
    #         INNER JOIN 
    #             sys.schemas s ON t.schema_id = s.schema_id
    #         INNER JOIN 
    #             sys.partitions p ON t.object_id = p.object_id
    #         WHERE 
    #             p.index_id IN (0, 1)  -- 0 = HEAP, 1 = CLUSTERED
    #             AND s.name = %s
    #             AND t.name = %s;
    #     """
    #     self.cursor_buffered.execute(sql, (schema, table))
    #     table_info = self.cursor_buffered.fetchone()
    #     return table_info

    def get_foreign_keys_metadata(self):
        """
            The method collects the foreign key metadata for the detach replica process.
        """
        self.__init_sync()
        schema_replica = "'%s'"  % "','".join([schema.strip() for schema in self.sources[self.source]["schema_mappings"]])
        self.logger.info("retrieving foreign keys metadata for schemas %s" % schema_replica)
        sql_fkeys = """
            SELECT 
                fk.name AS constraint_name,
                tp.name AS table_name,
                s.name AS table_schema,
                tr.name AS referenced_table_name,
                rs.name AS referenced_table_schema,
                STRING_AGG(c.name, ', ') AS fk_cols,
                STRING_AGG(rc.name, ', ') AS ref_columns,
                CASE fk.update_referential_action
                    WHEN 1 THEN 'CASCADE'
                    WHEN 2 THEN 'SET NULL'
                    WHEN 3 THEN 'SET DEFAULT'
                    ELSE 'NO ACTION'
                END AS update_rule,
                CASE fk.delete_referential_action
                    WHEN 1 THEN 'CASCADE'
                    WHEN 2 THEN 'SET NULL'
                    WHEN 3 THEN 'SET DEFAULT'
                    ELSE 'NO ACTION'
                END AS delete_rule
            FROM 
                sys.foreign_keys AS fk
            INNER JOIN 
                sys.foreign_key_columns AS fkc ON fk.object_id = fkc.constraint_object_id
            INNER JOIN 
                sys.tables AS tp ON fkc.parent_object_id = tp.object_id
            INNER JOIN 
                sys.schemas AS s ON tp.schema_id = s.schema_id
            INNER JOIN 
                sys.tables AS tr ON fkc.referenced_object_id = tr.object_id
            INNER JOIN 
                sys.schemas AS rs ON tr.schema_id = rs.schema_id
            INNER JOIN 
                sys.columns AS c ON fkc.parent_object_id = c.object_id AND fkc.parent_column_id = c.column_id
            INNER JOIN 
                sys.columns AS rc ON fkc.referenced_object_id = rc.object_id AND fkc.referenced_column_id = rc.column_id
            WHERE 
                s.name IN (%s)  -- 替换为实际的架构名称
                AND rs.name IN (%s)  -- 替换为实际的引用架构名称
            GROUP BY 
                fk.name, tp.name, s.name, tr.name, rs.name, fk.update_referential_action, fk.delete_referential_action
            ORDER BY 
                tp.name;
        """ % (schema_replica, schema_replica)
        self.cursor_buffered.execute(sql_fkeys)
        fkey_list=self.cursor_buffered.fetchall()
        self.disconnect_db_buffered()
        return fkey_list
    
    def create_destination_tables(self):
        """
            The method creates the destination tables in the loading schema.
            The tables names are looped using the values stored in the class dictionary schema_tables.
        """
        print("===============create_destination_tables======================")
        self.logger.info("Start to create tables.")
        self.pg_engine.check_migration_collate()
        self.migration_collate = False
        print("*****(1)self.migration_collate is *****")
        print(self.migration_collate)#避免使用table_collate,因为SQL Server中没有
        for schema in self.schema_tables:
            table_list = self.schema_tables[schema]
            try:
                compress_tables = self.enable_compress_tables[schema]
                enable_compress = True
            except KeyError:
                enable_compress = False
            for table in table_list:
                table_metadata = self.get_table_metadata(table, schema)#1
                print("=====Table metadata is======")
                print(table_metadata)
                if enable_compress and table in compress_tables:
                    self.pg_engine.create_table(table_metadata, None, None, table, schema, 'sqlserver', True)
                else:
                    self.pg_engine.create_table(table_metadata, None, None, table, schema, 'sqlserver' , False)
        self.logger.info("Finish creating all the tables")

####################################
#1329get_master_coordinates
#init_replica(5)
####################################
    def get_master_coordinates(self, cursor_buffered=None):
        """
        用于获取数据库主服务器的状态，特别是主服务器的坐标信息
        功能说明: 该方法获取主数据库的坐标信息并将其存储在字典中
        返回值: 主服务器的日志坐标信息
        """
        print("===============get_master_coordinates======================")
        #查询主服务器的状态，返回包括二进制日志文件名和位置等信息
        #sql_master = "SHOW MASTER STATUS;"适用于Mysql，展示了mysql-bin.000003文件的Position为194
        #SQL Server 中并没有直接对应于 MySQL 的二进制日志文件（Binlog）的概念，、
        #因此一些信息可能无法直接映射？
        sql_master = """
            SELECT TOP 1 
              [Current LSN]
            FROM 
              sys.fn_dblog(NULL, NULL)
            WHERE 
              [Operation] = 'LOP_COMMIT_XACT'
            ORDER BY 
               [Current LSN] DESC;
        """
        #查询了数据库id，数据库名称信息,这些信息是否足够？
        if cursor_buffered is None:
            self.cursor_buffered.execute(sql_master)
            master_status = self.cursor_buffered.fetchall()
        else:
            cursor_buffered.execute(sql_master)
            master_status = cursor_buffered.fetchall()
        print("主服务器最新commit lsn：",master_status)
        return master_status
              
####################################
#2025如果文件不存在则创建它
#init_replica(1),__init_sync(1)
####################################
    def create_file(self, filedir, filename):
        """
            The method create file if not exists.

            :param filedir: filedir
            :param filename: filename
        """
        if not os.path.exists(filedir):
            os.makedirs(filedir)
        if not os.path.isfile(filename):
            open(filename, 'x').close()
####################################
#2398根据配置给self属性赋值其最大复制存储值
#init_replica(1),__init_sync(4)
####################################
    def set_copy_max_memory(self):
        """
            The method sets the class variable self.copy_max_memory using the value stored in the
            source setting.
        """
        copy_max_memory = str(self.source_config["copy_max_memory"])[:-1]
        copy_scale = str(self.source_config["copy_max_memory"])[-1]
        try:
            int(copy_scale)
            copy_max_memory = self.source_config["copy_max_memory"]
        except:
            if copy_scale =='k':
                copy_max_memory = str(int(copy_max_memory)*1024)
            elif copy_scale =='M':
                copy_max_memory = str(int(copy_max_memory)*1024*1024)
            elif copy_scale =='G':
                copy_max_memory = str(int(copy_max_memory)*1024*1024*1024)
            else:
                print("**FATAL - invalid suffix in parameter copy_max_memory  (accepted values are (k)ilobytes, (M)egabytes, (G)igabytes.")
                os._exit(3)
        self.copy_max_memory = copy_max_memory

###################################
#2421初始化PostGIS状态
#init_replica(1),__init_sync(5)
###################################
    def __init_postgis_state(self):
        """
            The method check postgis state and update decode map
        """
        self.postgis_present = self.pg_engine.check_postgis()
        if self.postgis_present:
            self.hexify = self.hexify_always
            self.postgis_spatial_datatypes = self.postgis_spatial_datatypes.union(self.common_spatial_datatypes)
            for v in self.common_spatial_datatypes:
                # update common_spatial_datatypes value in map, decode them as geometry text
                self.decode_map[v] = self.__decode_postgis_spatial_value
        else:
            self.hexify = self.hexify_always.union(self.postgis_spatial_datatypes)
            for v in self.postgis_spatial_datatypes:
                # update postgis_spatial_datatypes value in map, decode them as hex
                self.decode_map[v] = self.__decode_hexify_value


###################################
#2478初始化与sqlserver同步相关的配置和连接
#V1直接拷贝mysql端的实现，未考虑sqlserver特性
#init_replica(1)
###################################
    def __init_sync(self):
        try:#尝试获得源配置
            self.source_config = self.sources[self.source]
        except KeyError:
            self.logger.error("The source %s doesn't exists " % (self.source))
            os._exit(0)
        #初始化输出目录
        self.out_dir = self.source_config["out_dir"]
        #初始化CSV目录
        try:
            csv_dir = self.source_config["csv_dir"]
            if os.path.exists(csv_dir):
                self.csv_dir = csv_dir
            else:
                self.csv_dir = ""
        except KeyError:
            self.csv_dir = ""
        #初始化列包含标志
        try:
            contains_columns = self.source_config["contain_columns"]
            if type(contains_columns) == bool:
                self.contains_columns = contains_columns
            else:
                self.contains_columns = False
        except KeyError:
            self.contains_columns = False
        #初始化列分隔符
        try:
            self.column_split = self.source_config["column_split"]
        except KeyError:
            self.column_split = ","
        #其他配置初始化
        self.copy_mode = self.source_config["copy_mode"]
        self.pg_engine.lock_timeout = self.source_config["lock_timeout"]
        self.pg_engine.grant_select_to = self.source_config["grant_select_to"]
        #初始化索引目录
        try:
            self.index_dir = self.source_config["index_dir"]
            if self.index_dir:
                self.index_dir = os.path.expanduser(self.index_dir)
        except KeyError:
            self.index_dir = None
        if self.index_dir is None:
            self.index_dir = os.path.expanduser('~/.pg_chameleon/index')
        self.index_file = self.index_dir + os.sep + INDEX_FILE
#        #创建索引文件
        if not self.only_migration_index and not self.is_create_index:
            self.create_file(self.index_dir, self.index_file)
            if not self.is_skip_completed_tables:
                open(self.index_file, 'w').close()
        
#        #初始化进度目录和文件
        self.progress_dir = os.path.expanduser('~/.pg_chameleon/progress')
        self.progress_file = self.progress_dir + os.sep + TABLE_PROGRESS_FILE
#        if self.is_skip_completed_tables:
#            self.create_file(self.progress_dir, self.progress_file)
#        #保存现有模式的标志
        if "keep_existing_schema" in self.sources[self.source]:
            self.keep_existing_schema = self.sources[self.source]["keep_existing_schema"]
        else:
            self.keep_existing_schema = False
#        #检查参数冲突2
        self.check_param_conflict()
#        #设置切片大小3和最大内存4
        self.get_slice_size()
        self.set_copy_max_memory()
#        #初始化PostGIS状态5
        self.__init_postgis_state()
#        #连接数据库✔
        self.connect_db_buffered()
        self.pg_engine.connect_db()
        #获取和设置模式映射
        self.schema_mappings = self.pg_engine.get_schema_mappings()
        self.pg_engine.schema_tables = self.schema_tables

####################################
#2546获得切片大小
#init_replica(1),__init_sync(3)
####################################
    def get_slice_size(self):
        try:
            slice_size_config = self.source_config["slice_size"]
            if isinstance(slice_size_config, int):
                self.slice_size = slice_size_config
            else:
                self.slice_size = DEFAULT_SLICE_SIZE
        except KeyError:
            self.slice_size = DEFAULT_SLICE_SIZE

####################################
#2556检查参数冲突
#init_replica(1),__init_sync(2)
####################################
    def check_param_conflict(self):
        if self.keep_existing_schema:
            if not self.is_create_index or self.is_skip_completed_tables:
                self.logger.error("is_create_index must be True and is_skip_completed_tables must be False when keep_existing_schema set True, exit.")
                os._exit(0)

####################################
#2993基础数据复制
#先实现其依赖的部分方法
####################################
    def init_replica(self):
        """
            The method performs a full init replica for the given source
            为给定的源执行完整的初始化副本
        """
        # 记录日志，表示开始为指定源初始化副本
        self.logger.debug("starting init replica for source %s" % self.source)
        # 初始化同步过程1✔
        #其子方法与数据库无关，直接复制即可
        self.__init_sync()
        print("开始执行基础复制")
        # 初始化转换映射2✔
        # self.__init_convert_map()
       # 检查 SQLServer配置(后续改进)
        self.check_sqlserver_config()
        # 检查 lower_case_table_names设置（有sql语句）4✔
        print("检查数据库信息开始")
        self.check_lower_case_table_names()
        # 获取主数据库的起始坐标（重要？该参数多处使用，后续改进？）（有sql语句）5✔
        print("获取数据库信息开始")
        master_start = self.get_master_coordinates()
        print("获取数据库信息完成")
        print(master_start)
        # 检查 OpenGauss 数据库是否存在--
        self.pg_engine.check_b_database()
         # 设置源状态为 "initialising"--
        self.pg_engine.set_source_status("initialising")
         # 清除批处理数据--
        self.pg_engine.clean_batch_data()
         # 保存主数据库状态--(和master_start对应)接收主状态数据，更新相应的数据库表，并记录相关日志信息
        self.pg_engine.sqlserver_save_master_status(master_start)
         # 清理源表--
        self.pg_engine.cleanup_source_tables()
        # 获取要复制的模式列表--
        self.schema_list = [schema for schema in self.schema_mappings]
        # 构建表的例外列表6✔
        self.__build_table_exceptions()
        # 获取表列表7（有sql语句）✔
        self.get_table_list()
        # # 如果设置了跳过已完成的表，初始化迁移进度变量8但是不需要该参数？
        # if self.is_skip_completed_tables:
        #     self.init_migration_progress_var()
        # 创建目标模式9✔
        self.create_destination_schemas()
        try:
            # 插入源的时间信息
            self.pg_engine.insert_source_timings()
             # 设置加载模式
            self.pg_engine.schema_loading = self.schema_loading
            if self.keep_existing_schema:
                # 断开缓冲数据库连接
                self.disconnect_db_buffered()
                # 复制表数据11
                self.__copy_tables()
            else:
               # 删除目标表12✔
                self.drop_destination_tables()
               # 创建目标表13
                self.create_destination_tables()
               # 断开缓冲数据库连接
                self.disconnect_db_buffered()
               # 复制表数据15
                self.__copy_tables()
               # 授予 SELECT 权限16
                self.pg_engine.grant_select()
                # 交换模式17
                self.pg_engine.swap_schemas()
               # 删除加载模式18✔
                self.drop_loading_schemas()
            # if self.is_skip_completed_tables:
            #     # 创建或清空进度文件
            #     open(self.progress_file, 'w').close()
            # 设置源状态为 "initialised"
            self.pg_engine.set_source_status("initialised")
            # 连接缓冲数据库
            self.connect_db_buffered()
            # 获取主数据库的结束坐标✔
            master_end = self.get_master_coordinates()
            # 断开缓冲数据库连接
            self.disconnect_db_buffered()
            # 设置源的高水位标记，不设置一致性
            #self.pg_engine.set_source_highwatermark(master_end, consistent=False)
             # 发送通知消息，表示初始化副本完成
            notifier_message = "init replica for source %s is complete" % self.source
            self.notifier.send_message(notifier_message, 'info')
            self.logger.info(notifier_message)
            print("===============init_replica END!======================")
        #异常
        except:
            #if not self.keep_existing_schema or not self.is_skip_completed_tables:
            self.drop_loading_schemas()
            self.pg_engine.set_source_status("error")
            notifier_message = "init replica for source %s failed" % self.source
            self.logger.critical(notifier_message)
            self.notifier.send_message(notifier_message, 'critical')
            raise


####################################
#3084数据库对象复制
#先实现其依赖的部分方法
####################################
    def start_database_object_replica(self, db_object_type):
        """
        The method start a database object's replication from mysql to destination with configuration.

        :param db_object_type: the database object type, refer to enumeration class DBObjectType
        """
        self.logger.info("Starting the %s replica for source %s." % (db_object_type.value, self.source))
        self.__init_sync()
        
        # sql_to_get_object_metadata代表sqlserver侧获取指定模式下的数据库对象的名称的sql语句，
        # 如SELECT v.name AS OBJECT_NAME FROM sys.views v INNER JOIN sys.schemas s ON v.schema_id = s.schema_id WHERE s.name = N'%s';代表查询指定模式下的所有视图名称
        sql_to_get_object_metadata = db_object_type.sql_to_get_object_metadata_sqlserver()
        self.pg_engine.pgsql_conn.execute("set b_compatibility_user_host_auth to on;")

        for schema in self.schema_mappings:
            self.logger.info("Starting the %s replica for schema %s." % (db_object_type.value, schema))
            # get metadata (object name) of all objects on schema
            if self.dump_json:
                self.cursor_buffered.execute(sql_to_get_object_metadata % (schema,))
                for object_metadata in self.cursor_buffered.fetchall():
                    managerJson.update({object_metadata["OBJECT_NAME"]: {
                        "name": object_metadata["OBJECT_NAME"],
                        "status": process_state.PENDING_STATUS,
                        "percent": process_state.PRECISION_START,
                        "error": ""
                    }})

            self.cursor_buffered.execute(sql_to_get_object_metadata % (schema,))
            self.create_single_object(db_object_type, schema)

    # 该函数用于在目标数据库中创建数据库对象
    def create_single_object(self, db_object_type, schema):
        """
        The method create single object.

        :param: db_object_type: the database object type, refer to enumeration class DBObjectType
        :param: schema: the schema name
        """
        # 获取EXEC sp_helptext @objname = N`%s`.`%s`;语句，该语句的作用是sqlserver侧查询指定数据库对象的ddl语句
        sql_to_get_create_object_statement = db_object_type.sql_to_get_create_object_statement_sqlserver()
        success_num = 0  # number of replication success records
        failure_num = 0  # number of replication fail records

        info_message = ". PLEASE create openGauss role!!! FIRST：set b_compatibility_user_host_auth to on; SECOND：create user `XXX`@`XXX` with password 'XXXXXX';(Attention: `` not '') THIRD: grant all privileges to `XXX`@`XXX`;"
        # 循环执行或使用sql翻译器翻译每一个源数据库对象的ddl语句
        for object_metadata in self.cursor_buffered.fetchall():
            object_name = object_metadata["OBJECT_NAME"]

            # get the details required to create the object
            self.cursor_buffered.execute(sql_to_get_create_object_statement % (schema, object_name))
            create_object_metadata = self.cursor_buffered.fetchone()
            # create_object_statement为创建数据库对象的sql语句
            create_object_statement = self.__get_create_object_statement(create_object_metadata, db_object_type)
            if db_object_type == DBObjectType.PROC or db_object_type == DBObjectType.FUNC:
                if not create_object_statement.endswith(";"):
                    create_object_statement += ";"
            try:
                # Method 1: Directly execute ddl to openGauss 直接将源数据库对象创建的ddl语句搬到openGauss侧执行
                tran_create_object_statement = self.__get_tran_create_object_statement(db_object_type,
                                                                                       schema,
                                                                                       create_object_statement)
                success_num = self.add_object_success(create_object_statement, db_object_type, object_name, schema,
                                                      success_num, tran_create_object_statement)
            except Exception as exp:
                self.logger.warning("Method 1 directly execute create %s %s.%s failed, error code "
                                  "is %s and error message is %s, so translate it according to sql-translator"
                                  % (db_object_type.value, schema, object_name, exp.code, exp.message))

                total_error_message = "Method 1 execute failed: %s" % exp.message
                is_user_not_exist = False
                if exp.code == USER_NOT_EXIST_ERROR_CODE:
                    is_user_not_exist = True

                # Method 2: translate sql to openGauss format 使用sql翻译器将源数据库对象创建的ddl语句翻译为openGauss侧的对象创建ddl语句
                # translate sql dialect in mysql format to opengauss format.
                stdout, stderr = self.sql_translator.sqlserver_to_opengauss(create_object_statement)
                if "java: command not found" in stderr:
                    total_error_message += "; " + "Method 2 parse sql failed: No java environment for running sql-translator, %s" % stderr.strip()

                    if is_user_not_exist:
                        total_error_message += info_message
                    failure_num = self.add_object_fail(create_object_statement, db_object_type, total_error_message,
                                                       failure_num, object_name)
                    continue

                tran_create_object_statement = self.__get_tran_create_object_statement(db_object_type, schema,
                                                                                       stdout)
                has_error, error_message = self.__unified_log(stderr)
                if has_error:
                    # if translation has any error, this replication also fail
                    # insert a failure record into the object replication status table
                    total_error_message += "; " + "Method 2 parse sql failed: %s" % error_message
                    if is_user_not_exist:
                        total_error_message += info_message
                    failure_num = self.add_object_fail(create_object_statement, db_object_type, total_error_message,
                                                       failure_num, object_name)
                    continue

                # if translate successful, add the corresponding database object to opengauss
                try:
                    success_num = self.add_object_success(create_object_statement, db_object_type, object_name,
                                                          schema, success_num, tran_create_object_statement)
                except Exception as exception:
                    total_error_message += "; " + "Method 2 execute failed: %s" % exception.message
                    if is_user_not_exist or exception.code == USER_NOT_EXIST_ERROR_CODE:
                        total_error_message += info_message
                    failure_num = self.add_object_fail(create_object_statement, db_object_type, total_error_message,
                                                       failure_num, object_name)
        self.logger.info("Complete the %s replica for schema %s, total %d, success %d, fail %d." % (
            db_object_type.value, schema, success_num + failure_num, success_num, failure_num))


    def __create_indices(self):
        sql_get_idx = """
            SELECT
                CASE
                    WHEN ind.is_primary_key = 1 THEN
                        'ALTER TABLE ' + OBJECT_SCHEMA_NAME(idx.object_id) + '.' + OBJECT_NAME(idx.object_id) +
                        ' ADD CONSTRAINT ' + isx.name + ' PRIMARY KEY (' + COL_NAME(ic.object_id,ic.column_id) + ')'
                    ELSE
                        'CREATE INDEX ' + idx.name + ' ON ' + OBJECT_SCHEMA_NAME(idx.object_id) + '.' + OBJECT_NAME(idx.object_id) +
                        ' (' + COL_NAME(ic.object_id,ic.column_id) + ')'
                END AS ddl_text,
                CASE
                    WHEN ind.is_primary_key = 1 THEN
                        'primary key on ' + OBJECT_NAME(idx.object_id)
                    ELSE
                        'index ' + idx.name + ' on ' + OBJECT_NAME(idx.object_id)
                END AS ddl_msg,
                CASE
                    WHEN ind.is_primary_key = 1 THEN
                        1
                    ELSE
                        0
                END AS table_pk
            FROM
                sys.indexes idx
            INNER JOIN
                sys.index_columns ic
            ON
                idx.object_id = ic.object_id AND idx.index_id = ic.index_id
            LEFT JOIN
                sys.key_constraints isx
            ON
                idx.name = isx.name AND idx.object_id = isx.parent_object_id
            INNER JOIN
                sys.indexes ind
            ON
                ind.object_id = idx.object_id AND ind.index_id = idx.index_id
            WHERE
                idx.object_id = OBJECT_ID('%s.%s')
        """

        for schema in self.schema_tables:
            table_list = self.schema_tables[schema]
            print(table_list)
            for table in table_list:
                loading_schema = self.schema_loading[schema]["loading"]
                destination_schema = self.schema_loading[schema]["destination"]
                self.pg_engine.pgsql_conn.settings['search_path'] = loading_schema
                stmt = self.pg_engine.pgsql_conn.prepare(sql_get_idx% (table, schema))
                idx_tab = stmt()
                print("--------------------------------------------------------------------------------------------------------------------")
                for idx in idx_tab:
                    self.logger.info('Adding %s', (idx[1]))
                    try:
                        self.pg_engine.pgsql_conn.execute(idx[0])
                    except:
                        self.logger.error("an error occcurred when executing %s" %(idx[0]))
                    if idx[2]:
                        self.pg_engine.store_table(destination_schema, table, ['foo'], None)


    def add_object_fail(self, create_object_statement, db_object_type, total_error_message, failure_num, object_name):
        self.pg_engine.insert_object_replicate_record(object_name, db_object_type, create_object_statement)
        self.logger.error("Two methods execute create %s %s failed, error message is %s" %
                          (db_object_type.value, object_name, total_error_message))
        self.logger.error("Copying the source object fail %s : %s" % (db_object_type.value, object_name))
        if self.dump_json:
            self.__copied_progress_json(db_object_type.value, object_name, process_state.FAIL_STATUS,
                                        total_error_message)
        failure_num += 1
        return failure_num

    # 该方法用于在openGauss侧创建数据库对象
    def add_object_success(self, create_object_statement, db_object_type, object_name, schema, success_num,
                           tran_create_object_statement):
        try:
            self.pg_engine.add_object(object_name, self.schema_mappings[schema], tran_create_object_statement)
        except KeyError:
            pass
        self.pg_engine.insert_object_replicate_record(object_name, db_object_type, create_object_statement,
                                                      tran_create_object_statement)
        self.logger.info("Copying the source object success %s : %s" % (db_object_type.value, object_name))
        if self.dump_json:
            self.__copied_progress_json(db_object_type.value, object_name, process_state.PRECISION_SUCCESS)
        success_num += 1
        return success_num

    def __get_tran_create_object_statement(self, db_object_type, schema, stdout):
        """
        Stdout should not be execute on opengauss directly, it needs to do some field replacement.
        :param db_object_type:
        :param schema:
        :param stdout:
        :return:
        """
        tran_create_object_statement = stdout
        if db_object_type in DBObjectType:
            if db_object_type == DBObjectType.PROC or db_object_type == DBObjectType.FUNC:
                tran_create_object_statement = re.sub(r"/[\s]*$", "", tran_create_object_statement)
            try:
                tran_create_object_statement = tran_create_object_statement.replace(schema + ".", self.schema_mappings[schema] + ".") \
                    .replace("`" + schema + "`.", "`" + self.schema_mappings[schema] + "`.")
            except KeyError:
                pass
        return tran_create_object_statement

    def __unified_log(self, stderr):
        """
        embed og-translator's log records into chameleon's log system
        :param stderr:
        :return:
        """
        has_error = False  # a sign of whether the translation is successful
        # the log format on og-translator is: %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{100} - %msg%n
        error_message = ["Translate failed by og-translator"]
        for log in stderr.splitlines():
            log_split = log.split(' ')
            if len(log_split) > LOG_LEVEL_INDEX:
                level_name = logging.getLevelName(log_split[LOG_LEVEL_INDEX])
                parse_error_level_name = logging.getLevelName(log_split[1])
                if level_name == logging.ERROR:
                    # when level_name value is ERROR
                    # it means there is an error log record in the translation, maybe the sql statement
                    # cannot be translated
                    # when level_name could not be got
                    # it means there is a problem with the project og-translator itself
                    has_error = True
                    self.logger.error(log)
                    error_message.append(log)
                elif parse_error_level_name == logging.ERROR:
                    has_error = True
                    self.logger.error(log)
                    error_message.append(log)
                elif not isinstance(level_name, int):
                    print(log)
                else:
                    self.logger.log(level_name, log)
            else:
                print(log)
        return [has_error, error_message]

    def __get_create_object_statement(self, create_object_metadata, db_object_type):
        """
        Get the sql required to create the object, which in sqlserver dialect format

        :param create_object_metadata:
        :param db_object_type:
        :return:
        """
        if db_object_type == DBObjectType.VIEW:
            create_object_statement = create_object_metadata["definition"] # definition代表create_object_metadata的definition列
        elif db_object_type == DBObjectType.TRIGGER:
            create_object_statement = create_object_metadata["definition"]
        elif db_object_type == DBObjectType.PROC:
            create_object_statement = create_object_metadata["definition"]
        elif db_object_type == DBObjectType.FUNC:
            create_object_statement = create_object_metadata["definition"]
        else:
            create_object_statement = ""
        # self.logger.debug("The statement of creating object is %s" % (create_object_statement,))
        create_object_statement = create_object_statement.replace("elseif", "else if")
        return create_object_statement
    
    ##########################################__copy_tables相关###########

    def __copy_tables(self):
        """
            The method copies the data between tables, from the mysql schema to the corresponding
            postgresql loading schema. Before the copy starts the table is locked and then the lock is released.
            If keep_existing_schema is true for the source then the tables are truncated before the copy,
            the indices are left in place and a REINDEX TABLE is executed after the copy.
        """
        print("===============开始表复制======================")
        self.delete_csv_file()
        # get retry count, default is 3
        self.retry = self.__get_count("retry", RETRY_COUNT, True)
        self.__before_copy_tables()
        self.generate_metadata_statement()
        self.__start_with_datacheck_process()
        self.__exec_copy_tables_tasks(self.__build_all_tasks())
        if self.with_datacheck:
            self.__delete_failedtable_record()
        self.__retry_tasks(self.read_retry_queue)
        if self.with_datacheck:
            self.wait_for_finish(self.with_datacheck_processes)


    def __retry_tasks(self, tasks_queue):
        if tasks_queue.empty() or self.retry == 0:
            self.logger.info("[RETRY] no need enter retry now: retry=%d, tasks: %s" % (
                self.retry,
                "empty" if tasks_queue.empty() else "not_empty"))
            return
        self.retry -= 1
        tasks_list = []
        while not tasks_queue.empty():
            tasks_list.append(tasks_queue.get(block=True))
        self.logger.info("[RETRY] failed table tasks received, retry count is %d. task:%s" % (self.retry, len(tasks_list)))
        self.truncate_table_and_delete_csv(tasks_list)
        self.__before_copy_tables()
        self.__exec_copy_tables_tasks(tasks_list)
        if self.with_datacheck:
            self.__delete_failedtable_record()
        tasks_queue = self.read_retry_queue
        if self.retry < 0:
            self.__retry_tasks(tasks_queue)
        else:
            self.__retry_tasks(tasks_queue)
            
    def truncate_table_and_delete_csv(self, retry_table_task_list):
        self.logger.info('[RETRY] begin to delete failed tables')
        try:
            self.pg_engine.connect_db()
        except BaseException as exp:
            self.logger.error('[RETRY] build new connect to og failed!')
            raise
        for a_read_data_task in retry_table_task_list:
            table_name = a_read_data_task.table
            loading_schema = a_read_data_task.loading_schema
            schema = a_read_data_task.schema
            self.logger.info('[RETRY] delete %s.%s'%(loading_schema, table_name))
            try:
                self.pg_engine.pgsql_conn.execute("truncate table %s.%s" % (loading_schema, table_name))
            except Exception as exp:
                self.logger.error("truncate table %s.%s failed and the error message is %s"
                                  % (loading_schema, table_name, exp.message))
            self.delete_table_csv_file(schema, table_name)
        self.pg_engine.disconnect_db()
        self.logger.info('[RETRY] end to delete failed tables')

    def delete_table_csv_file(self, schema, table):
        """
            Delete csv file for error table

            :param schema: the schema name
            :param table: the table name
        """
        csv_file_dir = self.out_dir + os.sep + CSV_DATA_SUB_DIR
        if os.path.exists(csv_file_dir):
            file_list = os.listdir(csv_file_dir)
            csv_prefix = ("%s_%s_slice") % (schema, table)
            for file in file_list:
                if file.startswith(csv_prefix):
                    os.remove(csv_file_dir + os.sep + file)

    
    def __delete_failedtable_record(self):
        self.delete_failedtable.set(True)
        self.delete_failedtable_lock.acquire()
        log_size = self.writer_log_queue.qsize()
        while log_size > 0:
            log_size -= 1
            log_record = self.writer_log_queue.get()
            if log_record is None:
                self.writer_log_queue.put(None)
                continue
            schema = log_record.get("schema")
            table = log_record.get("table")
            retry_size = self.read_retry_queue.qsize()
            reput_record = True
            while retry_size > 0:
                retry_size -= 1
                task = self.read_retry_queue.get()
                if schema == task.schema and table == task.table:
                    reput_record = False
                    self.read_retry_queue.put(task)
                    break
                self.read_retry_queue.put(task)
            if reput_record:
                self.writer_log_queue.put(log_record)
        self.delete_failedtable_lock.release()
        self.delete_failedtable.set(False)

       # build task by schema_tables
    def __build_all_tasks(self):
        tasks = []
        for schema in self.schema_tables:
            loading_schema = self.schema_loading[schema]["loading"]
            destination_schema = self.schema_loading[schema]["destination"]
            table_list = self.schema_tables[schema]
            for table in table_list:
                task = ReadDataTask(destination_schema=destination_schema, loading_schema=loading_schema,
                                    schema=schema, table=table)
                tasks.append(task)
        return tasks

    def __exec_copy_tables_tasks(self, tasks_lists):
        self.logger.info('begin to exec copy table tasks')
        readers = self.__get_count("readers", 8)
        writers = self.__get_count("writers", 8)
        writer_pool = []
        reader_pool = []
        self.init_workers(readers, self.data_reader, reader_pool, "reader-")
        self.init_workers(writers, self.data_writer, writer_pool, "writer-")
        tasks_lists.append(None)
        for task in tasks_lists:
            self.read_task_queue.put(task, block=True)
        self.wait_for_finish(reader_pool)
        reader_pool.clear()
        if self.with_datacheck and self.__no_need_retry():
            self.reader_log_queue.put(None)

        while not self.index_waiting_queue.empty():
            self.write_task_queue.put(self.index_waiting_queue.get(block=True), block=True)
        self.write_task_queue.put(None, block=True)
        self.wait_for_finish(writer_pool)
        writer_pool.clear()

        if self.with_datacheck and self.__no_need_retry():
            self.writer_log_queue.put(None)
            self.write_csv_finish.set(True)

    def __no_need_retry(self):
        return self.read_retry_queue.qsize() == 0 or self.retry == 0

    def init_workers(self, count, target_func, pool, worker_prefix):
        for x in range(count):
            process = multiprocessing.Process(target=target_func)
            process.daemon = True
            process.name = worker_prefix + str(x + 1)
            pool.append(process)
            process.start()


    def data_reader(self):
        self.execute_task(task_queue=self.read_task_queue)

    def data_writer(self):
        writer_engine = self.get_new_engine()
        writer_engine.connect_db()
        writer_engine.set_source_status("initialising")
        writer_engine.open_b_compatibility_mode()
        self.execute_task(task_queue=self.write_task_queue, engine=writer_engine)
        writer_engine.disconnect_db()
    
    def get_new_engine(self):
        new_engine = pg_engine()
        new_engine.dest_conn = self.pg_engine.dest_conn
        new_engine.logger = self.pg_engine.logger
        new_engine.source = self.pg_engine.source
        new_engine.full = self.pg_engine.full
        new_engine.type_override = self.pg_engine.type_override
        new_engine.sources = self.pg_engine.sources
        new_engine.notifier = self.pg_engine.notifier
        new_engine.migrate_default_value = self.pg_engine.migrate_default_value
        new_engine.sqlserver_version = -1 if self.is_mariadb == False else self.version
        new_engine.index_parallel_workers = self.pg_engine.index_parallel_workers
        return new_engine


    def wait_for_finish(self, processes):
        for process in processes:
            if process.is_alive():
                process.join()

    def execute_task(self, task_queue, engine=None):
        while True:
            task = task_queue.get(block=True)
            if task is None:
                task_queue.put(task, block=True)
                break
            if engine is not None:
                # this is the data_writer process
                if isinstance(task, CopyDataTask):
                    self.copy_table_data(task, engine)
                elif isinstance(task, CreateIndexTask):
                    self.create_index_process(task, engine)
                else:
                    self.logger.error("unknown write task type")
            # this is the data_reader process
            elif isinstance(task, ReadDataTask):
                self.read_data_process(task)
                print("read_data_process完成")
            else:
                self.logger.error("unknown read task type")


    def copy_table_data(self, task, writer_engine):
        if self.copy_mode == "direct":
            csv_file = task.csv_file
        elif self.copy_mode == "file":
            csv_file = open(task.csv_file, 'rb') 
        else:
            self.logger.warning("unknown copy mode, use \'direct\' mode")
            csv_file = task.csv_file

        if csv_file is None:
            self.logger.warning("this is an empty csv file, you should check your batch for errors")
            return
        count_rows = task.count_rows
        print(count_rows)
        schema = task.schema
        table = task.table
        select_columns = task.select_columns
        total_rows = count_rows["table_rows"]
        copy_limit = DATA_NUM_FOR_A_SLICE_CSV
        avg_row_length = int(count_rows["avg_row_length"])
        loading_schema = self.schema_loading[schema]["loading"]
        column_list = select_columns["column_list"]
        column_list_select = select_columns["column_list_select"]
        if copy_limit == 0:
            copy_limit = DATA_NUM_FOR_A_SLICE_CSV
        num_slices = int(total_rows // copy_limit)
        range_slices = list(range(num_slices + 1))
        total_slices = len(range_slices)
        task_slice = task.slice
        contain_columns = task.contain_columns
        column_split = task.column_split
        copy_data_from_csv = True
        if self.with_datacheck and task.slice == -1:
            self.put_writer_record("SLICE", task)
            return
        
        try:
            writer_engine.copy_data(csv_file, loading_schema, table, column_list, contain_columns, column_split)
            self.put_writer_record("SLICE", task)
            if self.dump_json:
                percent = 1.0 if (task_slice + 1) > total_slices else (task_slice + 1) / total_slices
                self.__copied_progress_json("table", table, percent)
                self.__copy_total_progress_json(copy_limit, avg_row_length)
        except Exception as exp:
            if self.dump_json:
                self.__copied_progress_json("table", table, process_state.FAIL_STATUS, exp.message)
            self.logger.error("SQLCODE: %s SQLERROR: %s" % (exp.code, exp.message))
            self.logger.info("Table %s.%s error in copy csv mode, saving slice number for the fallback to "
                             "insert statements" % (loading_schema, table))
            copy_data_from_csv = False
        finally:
            csv_file.close()
            if self.copy_mode == "file" and not self.with_datacheck:
                try:
                    remove(task.csv_file)
                except Exception as exp:
                    self.logger.error("remove csv file failed %s and the exp message is %s"
                                      % (task.csv_file, exp.message))
            del csv_file
            gc.collect()
        self.print_progress(task_slice + 1, total_slices, schema, table)
        if self.is_skip_completed_tables and copy_data_from_csv:
            self.handle_migration_progress(schema, table)
        elif not copy_data_from_csv:
            ins_arg = {"slice_insert": task_slice, "table": table, "schema": schema,
                       "select_stat": select_columns["select_stat"], "column_list": column_list,
                       "column_list_select": column_list_select, "copy_limit": copy_limit}
            self.insert_table_data(ins_arg)
            self.put_writer_record("SLICE", task)

    def put_writer_record(self, record_type, task, index_status=None, contains_index=None):
        """
            The method put written records into writer log queue.

            :param type: slice or index
            :param task: copydatatask or createindextask
            :param indexStatus: create index status
            :param containsIndex: whether table contains index
        """
        if not self.with_datacheck:
            return
        if record_type == "SLICE":
            self.writer_log_queue.put({"type":"SLICE","schema":task.schema,"table":task.table,"name":os.path.basename(task.csv_file),
                "no":task.slice + 1,"total":None,"beginIdx":task.idx_pair.first,"endIdx":task.idx_pair.second,"slice":True})
        if record_type == "INDEX":
            self.writer_log_queue.put({"type":"INDEX","schema":task.schema,"table":task.table,
                    "timestamp":strftime('%Y-%m-%d %H:%M:%S', localtime()),"indexStatus":index_status,"containsIndex":contains_index})

    def __copied_progress_json(self, type_val, name, value, error=""):
        if managerJson[name]["percent"] < value:
            if process_state.is_precision_success(value):
                status = process_state.ACCOMPLISH_STATUS
            elif process_state.is_fail_status(value):
                status = process_state.FAIL_STATUS
            else:
                status = process_state.PROCESSING_STATUS
            if len(managerJson[name]["error"]) > 0:
                error = managerJson[name]["error"] + error
            managerJson.update({name: {
                "name": name,
                "status": status,
                "percent": value,
                "error": error
            }})

    def __copy_total_progress_json(self, record, avg_row_length):
        origin_record = totalRecord["totalRecord"]
        total_record = origin_record + record
        origin_data = totalData["totalData"]
        total_data = origin_data + record * avg_row_length
        data = format(total_data / BYTE_TO_MB_CONVERSION, '.2f')
        start_time = INITIAL_TIME["initialTime"]
        tt = time.time()
        current_time = int(tt)
        migration_time = current_time - start_time
        if migration_time > 0:
            speed = format(total_data / migration_time / BYTE_TO_MB_CONVERSION, '.2f')
        else:
            speed = 0

        totalRecord.update({"totalRecord": total_record})
        totalData.update({"totalData": total_data})
        managerJson.update({"total": {
            "record": total_record,
            "data": data,
            "time": migration_time,
            "speed": speed
        }})



    def delete_csv_file(self):
        """
            Delete all the csv file in out_dir
        """
        csv_file_dir = self.out_dir + os.sep + CSV_DATA_SUB_DIR
        chameleon_dir = self.out_dir + os.sep + CSV_META_SUB_DIR
        if os.path.exists(csv_file_dir):
            os.system("rm -rf " + chameleon_dir + "/*")
        os.makedirs(csv_file_dir)
    
    def __get_count(self, key, default_val, enable_less_than_zero=False):
        if key not in self.source_config:
            return default_val
        val = int(self.source_config[key])
        if enable_less_than_zero:
            return val
        return default_val if (val < 0) else val

    def __before_copy_tables(self):
        size = int(int(self.copy_max_memory) / DEFAULT_MAX_SIZE_OF_CSV)
        self.write_task_queue = multiprocessing.Manager().Queue(size if size > MINIUM_QUEUE_SIZE else MINIUM_QUEUE_SIZE)
        self.read_task_queue = multiprocessing.Manager().Queue()
        self.read_retry_queue = multiprocessing.Manager().Queue()
        self.index_waiting_queue = multiprocessing.Manager().Queue()

    def generate_metadata_statement(self):
        """
            The method generate matedata info and store to file
        """
        try:
            cursor_manager = reader_cursor_manager(self)
            cursor_buffered = cursor_manager.cursor_buffered
            for schema in self.schema_list:
                self.write_metadata_file(schema, cursor_buffered)
        except Exception as exp:
            self.logger.error(exp)
            self.logger.info("generate metadata statement failed.")
        finally:
            cursor_manager.close()
            self.logger.warning("close connection for get metadata statement.")

    def write_metadata_file(self, schema, cursor_buffered):
        """
            The method gets table and column metadata, then writes to csv files.

            :param schema: the table or column metadata task
            :param cursor_buffered: buffered cursor for mysql connection
        """
        csv_file_dir = self.out_dir + os.sep + CSV_META_SUB_DIR + os.sep
        sql_rows = """
           SELECT
               SUM(p.rows) AS table_rows
           FROM
               sys.tables t
           JOIN
               sys.partitions p ON t.object_id = p.object_id
           WHERE
               t.name = %s
               AND t.schema_id = SCHEMA_ID(%s);
        """
        self.logger.info("start write meta information to file.")
        table_metadata_file = csv_file_dir + "%s_information_schema_tables.csv" % schema
        column_metadata_file = csv_file_dir + "%s_information_schema_columns.csv" % schema
        with open(table_metadata_file, 'a') as fw_table, open(column_metadata_file, 'a') as fw_column:
            for table in self.schema_tables[schema]:
                cursor_buffered.execute(sql_rows, (table,schema))
                select_data = cursor_buffered.fetchone()
                self.generate_table_metadata_statement(schema, table, select_data.get("table_rows"), fw_table, cursor_buffered)
                self.generate_column_metadata_statement(schema, table, fw_column, cursor_buffered) 
        self.logger.info("finish write meta information to file.")  


    # 获取表的元数据
    def generate_table_metadata_statement(self, schema, table, table_count, fw_table, cursor=None):
        self.logger.info("start write table metadata for `%s`.`%s`." % (schema, table))
        contain_primary_key_select = """
            SELECT COUNT(*) AS COUNT
            FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE 
            WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s

        """
        if cursor is None:
            self.cursor_buffered.execute(contain_primary_key_select, (schema, table))
            contain_primary_key = self.cursor_buffered.fetchone()['COUNT']
        else:
            cursor.execute(contain_primary_key_select, (schema, table))
            contain_primary_key = cursor.fetchone()['COUNT']
        if contain_primary_key == 0:
            task = TableMetadataTask(schema, table, table_count, 0)
        else:
            task = TableMetadataTask(schema, table, table_count, 1)
        fw_table.write(json.dumps(task.__dict__, ensure_ascii=False) + os.linesep)
        self.logger.info("finish write table metadata for `%s`.`%s`." % (schema, table))


    # sqlserver中的列元信息并没有COLUMN_KEY，所以查询时少了COLUMN_KEY字段。
    def generate_column_metadata_statement(self, schema, table, fw_column, cursor=None):
        self.logger.info("start write column metadata for `%s`.`%s`." % (schema, table))
        column_metadata_select = """
        SELECT 
            COLUMN_NAME, 
            ORDINAL_POSITION, 
            DATA_TYPE AS COLUMN_TYPE
            INFORMATION_SCHEMA.COLUMNS
        WHERE 
            TABLE_SCHEMA = %s
            AND TABLE_NAME = %s"""
        if cursor is None:
            self.cursor_buffered.execute(column_metadata_select, (schema, table))
            column_metadata = self.cursor_buffered.fetchall()
        else:
            cursor.execute(column_metadata_select, (schema, table))
            column_metadata = cursor.fetchall()
        for a_column_metadata in column_metadata:
            task = ColumnMetadataTask(schema, table, a_column_metadata["COLUMN_NAME"], a_column_metadata["ORDINAL_POSITION"],
                                        a_column_metadata["COLUMN_TYPE"], a_column_metadata["COLUMN_KEY"])
            fw_column.write(json.dumps(task.__dict__, ensure_ascii=False) + os.linesep)
        self.logger.info("finish write column metadata for `%s`.`%s`." % (schema, table))


    def __start_with_datacheck_process(self):
        if self.with_datacheck:
            self.logger.info('begin to start with_datacheck process')
            self.with_datacheck_processes = []
            self.init_with_datacheck_var()
            reader_log_processer = multiprocessing.Process(target=self.process_reader_logger, name="reader-log-process", daemon=True)
            writer_log_processer = multiprocessing.Process(target=self.process_writer_logger, name="writer-log-process", daemon=True)
            csv_file_processor = multiprocessing.Process(target=self.process_csv_file, name="csv-file-processor", daemon=True)
            reader_log_processer.start()
            writer_log_processer.start()
            csv_file_processor.start()
            self.with_datacheck_processes.append(reader_log_processer)
            self.with_datacheck_processes.append(writer_log_processer)
            self.with_datacheck_processes.append(csv_file_processor)
            self.logger.info('with_datacheck process started')
    
    def __get_csvdir_space(self, csv_file_dir):
        total_space = os.popen("df -h " + csv_file_dir + " | awk {'print $4'}").read().split(os.linesep)[1]
        return self.__format_space_in_kb(total_space)  

    def __format_space_in_kb(self, space):
        space_in_kb = 0.0
        unit = space[-1:].upper()
        if unit == 'K':
            space_in_kb = float(space[:-1])
        elif unit == 'M':
            space_in_kb = float(space[:-1]) * 1024
        elif unit == 'G':
            space_in_kb = float(space[:-1]) * 1024 * 1024
        elif unit == 'T':
            space_in_kb = float(space[:-1]) * 1024 * 1024 * 1024
        return space_in_kb
    


    def init_with_datacheck_var(self):
        """
        Initialize variables when cooperating with datacheck.
        """
        self.logger.info("start to init with_datacheck variables.")
        self.reader_log_queue = multiprocessing.Manager().Queue()
        self.writer_log_queue = multiprocessing.Manager().Queue()
        self.total_slice_dict = multiprocessing.Manager().dict()
        self.stop_data_reader = multiprocessing.Manager().Value(bool, False)
        self.write_csv_finish = multiprocessing.Manager().Value(bool, False)
        self.delete_failedtable = multiprocessing.Manager().Value(bool, False)
        self.delete_failedtable_lock = multiprocessing.Manager().Lock()
        csv_file_dir = self.out_dir + os.sep + CSV_DATA_SUB_DIR
        try:
            csv_files_threshold_config = self.source_config["csv_files_threshold"]
            if isinstance(csv_files_threshold_config, int):
                self.csv_files_threshold = csv_files_threshold_config
            else:
                self.csv_files_threshold = int(int(os.popen("ulimit -n").read().strip()) / 2)
                self.logger.warn("parameter csv_files_threshold config wrong," + 
                "set csv_files_threshold to " + str(self.csv_files_threshold))
        except KeyError:
            self.csv_files_threshold = int(int(os.popen("ulimit -n").read().strip()) / 2)
            self.logger.warn("parse parameter csv_files_threshold failed," + 
                "set csv_files_threshold to " + str(self.csv_files_threshold))
        try:
            csv_dir_space_config = self.source_config["csv_dir_space_threshold"]        
            if isinstance(csv_dir_space_config, int):
                self.csv_dir_space_threshold = csv_dir_space_config * 1024 * 1024
            else:
                self.csv_dir_space_threshold = self.__get_csvdir_space(csv_file_dir) / 2
            self.logger.warn("parameter csv_dir_space_threshold config wrong," + 
                "set csv_dir_space_threshold to " + str(self.csv_dir_space_threshold / (1024 * 1024)) + "GB")
        except KeyError:
            self.csv_dir_space_threshold = self.__get_csvdir_space(csv_file_dir) / 2
            self.logger.warn("parse parameter csv_dir_space_threshold failed, " +
                "set csv_dir_space_threshold to " + str(self.csv_dir_space_threshold / (1024 * 1024)) + "GB")
        self.logger.info("Finish initing with_datacheck variables.")

    def process_reader_logger(self):
        """
        The method write reader log to file when cooperating with datacheck
        """
        reader_log_path = self.out_dir + os.sep + CSV_META_SUB_DIR + os.sep + "reader.log"
        if os.path.isfile(reader_log_path):
            open(reader_log_path, 'w').close()            
        with open(reader_log_path, 'a') as freader:
            while True:
                log_record = self.reader_log_queue.get()
                if log_record is None:
                    freader.write("finished")
                    break
                freader.write(json.dumps(log_record, ensure_ascii=False) + os.linesep)
                freader.flush()

    def process_writer_logger(self):
        """
        The method write writer log to file when cooperating with datacheck
        """
        writer_log_path = self.out_dir + os.sep + CSV_META_SUB_DIR + os.sep + "writer.log"
        if os.path.isfile(writer_log_path):
            open(writer_log_path, 'w').close()
        with open(writer_log_path, 'a') as fwriter:
            while True:
                if self.delete_failedtable.get():
                    time.sleep(1)
                    continue
                if self.is_write_log_finished(fwriter):
                    break
        
    def is_write_log_finished(self, fwriter):
        with self.delete_failedtable_lock:
            return self.process_writer_log_queue(fwriter)
        
    def process_writer_log_queue(self, fwriter):
        """
        The method process writer_log_queue when cooperating with datacheck
        """
        log_record = self.writer_log_queue.get()
        if log_record is None:
            while not self.writer_log_queue.empty():
                log_record = self.writer_log_queue.get()
                total_slice = self.total_slice_dict.get('`%s`.`%s`' % (log_record.get("schema"), log_record.get("table")))
                log_record.update({"total": total_slice, "slice":(total_slice > 1)})
                fwriter.write(json.dumps(log_record, ensure_ascii=False) + os.linesep)
            fwriter.write("finished")
            fwriter.flush()
            return True
        else:
            if log_record.get("type") == "SLICE" and log_record.get("total") is None:
                total_slice = self.total_slice_dict.get('`%s`.`%s`' % (log_record.get("schema"), log_record.get("table")))
                if total_slice is None:
                    self.writer_log_queue.put(log_record)
                    time.sleep(2)
                    return False
                log_record.update({"total": total_slice, "slice":(total_slice > 1)})
            fwriter.write(json.dumps(log_record, ensure_ascii=False) + os.linesep)
            fwriter.flush()
        return False            

    def process_csv_file(self):
        """
        The method remove check file and flow control when cooperating with datacheck
        """
        csv_file_dir = self.out_dir + os.sep + CSV_DATA_SUB_DIR
        while True:
            for file_name in os.listdir(csv_file_dir):
                if file_name.endswith(".check"):
                    os.remove(csv_file_dir + os.sep + file_name)
            file_nums = int(os.popen("ls " +  csv_file_dir + " | wc -l").read())
            if self.write_csv_finish.get() and file_nums == 0:
                break
            limit_reader = self.is_limit_reader(file_nums, csv_file_dir)
            if limit_reader and not self.stop_data_reader.get():
                self.stop_data_reader.set(True)
            elif not limit_reader and self.stop_data_reader.get():
                self.stop_data_reader.set(False)
            time.sleep(2)

    def is_limit_reader(self, file_nums, csv_file_dir):
        """
        The method judge whether to perform flow control when cooperating with datacheck

        :param file_nums: file number in csv_file_dir
        :param csv_file_dir: csv file path
        :return: return True if limit reader process else False
        :rtype: bool
        """
        return file_nums >= self.csv_files_threshold or self.is_storage_beyond(csv_file_dir)
    
    def is_storage_beyond(self, csv_file_dir):
        """
        The method judge whether the csv storage space exceeds the threshold

        :param csv_file_dir: csv file path
        :return: return True if csv storage space exceeds the threshold else False
        :rtype: bool
        """
        used_storage = os.popen("du -sh " +  csv_file_dir + " | awk {'print $1'}").read().strip()
        used_storage_in_kb = self.__format_space_in_kb(used_storage)
        return used_storage_in_kb >= self.csv_dir_space_threshold
    def print_progress(self, iteration, total, schema, table):
        """
            Print the copy progress in slices and estimated total slices.
            In order to reduce noise when the log level is info only the tables copied in multiple slices
            get the print progress.

            :param iteration: The slice number currently processed
            :param total: The estimated total slices
            :param table_name: The table name
        """
        if iteration >= total:
            total = iteration
        if total > 1:
            self.logger.info("Table %s.%s copied %s slice of %s" % (schema, table, iteration, total))
        else:
            self.logger.debug("Table %s.%s copied %s slice of %s" % (schema, table, iteration, total))


    def handle_migration_progress(self, schema, table, total_slices = -1):
        """
            The method append completed slice to progress dict and check whether migration of table completed.

            :param schema: schema
            :param table: table
            :param total_slices: total slice number
        """
        if total_slices >= 0:
            self.table_slice_num_dict['`%s`.`%s`' % (schema, table)].value = total_slices
        else:
            self.table_completed_slice_dict['`%s`.`%s`' % (schema, table)].append(1)
        if self.check_table_completed(schema, table):
            self.flush_progress_file(schema, table)
            
    def insert_table_data(self, ins_arg):
        """
            This method is a fallback procedure whether copy_table_data fails.
            The ins_args is a list with the informations required to run the select for building the insert
            statements and the slices's start and stop.
            The process is performed in memory and can take a very long time to complete.

            :param ins_arg: the list with the insert arguments (slice_insert, schema, table, select_stat,column_list, copy_limit)
        """
        print(ins_arg)
        slice_insert = ins_arg["slice_insert"]
        table = ins_arg["table"]
        schema = ins_arg["schema"]
        select_stat = ins_arg["select_stat"]
        column_list = ins_arg["column_list"]
        column_list_select = ins_arg["column_list_select"]
        copy_limit = ins_arg["copy_limit"]
        loading_schema = self.schema_loading[schema]["loading"]
        first_column = column_list.split(',')[0].strip('[]')
        print(first_column)
        conn_unbuffered = self.get_connect()
        cursor_unbuffered = conn_unbuffered.cursor()

        self.logger.info("Executing inserts in %s.%s. Slice %s. Rows per slice %s." % (
        loading_schema, table, slice_insert, copy_limit,))
        offset = slice_insert * copy_limit
        sql_fallback = "SELECT {} FROM {}.{} ORDER BY {} OFFSET {} ROWS FETCH NEXT {} ROWS ONLY;".format(select_stat,schema,table, first_column, offset,copy_limit)
        params = (copy_limit, select_stat, schema)
        cursor_unbuffered.execute(sql_fallback)
        insert_data = cursor_unbuffered.fetchall()
        self.pg_engine.insert_data(loading_schema, table, insert_data, column_list, column_list_select)
        if self.is_skip_completed_tables:
            self.handle_migration_progress(schema, table)

        cursor_unbuffered.close()
        conn_unbuffered.close()
    
    def create_index_process(self, task, writer_engine):
        if task.indices is None:
            self.logger.error("index data is None")
            return
        if len(task.indices) == 0:
            self.logger.info("there are no indices be created, just store the table, tablename: "
                            + '`%s`.`%s`' % (task.schema, task.table))
            writer_engine.store_table(task.destination_schema, task.table, [], task.master_status)
            self.put_writer_record("INDEX", task, "None", False)
            if self.is_skip_completed_tables:
                self.handle_migration_progress(task.schema, task.table)
            return
        table = task.table
        destination_schema = task.destination_schema
        schema = task.schema
        if self.only_migration_index:
            loading_schema = destination_schema
        else:
            loading_schema = self.schema_loading[schema]["loading"]
        master_status = task.master_status
        try:
            if self.keep_existing_schema:
                table_pkey = writer_engine.get_existing_pkey(destination_schema, table)
                self.logger.info("Collecting constraints and indices from the destination table  %s.%s" % (
                    destination_schema, table))
                writer_engine.collect_idx_cons(destination_schema, table)
                self.logger.info("Removing constraints and indices from the destination table  %s.%s" % (
                    destination_schema, table))
                writer_engine.cleanup_idx_cons(destination_schema, table)
                writer_engine.truncate_table(destination_schema, table)
            else:
                self.put_writer_record("INDEX", task, "START", True)
                table_pkey = writer_engine.create_indices1(loading_schema, table, task.indices,
                                                          task.is_parallel_create_index)
                self.put_writer_record("INDEX", task, "END", True)
                writer_engine.create_auto_increment_column(loading_schema, table, task.auto_increment_column)

            writer_engine.store_table(destination_schema, table, table_pkey, master_status)
            if self.keep_existing_schema:
                self.logger.info(
                    "Adding constraint and indices to the destination table  %s.%s" % (destination_schema, table))
                writer_engine.create_idx_cons(destination_schema, table)
            if self.is_skip_completed_tables:
                self.handle_migration_progress(schema, table)
        except:
            self.logger.error("create index or constraint error")

    def read_data_process(self, task):
        cursor_manager = reader_cursor_manager(self)
        destination_schema = task.destination_schema
        loading_schema = task.loading_schema
        schema = task.schema
        table = task.table
        self.logger.warning("create connection for table %s.%s" % (schema, table))

        self.logger.info("Copying the source table %s into %s.%s" % (table, loading_schema, table))
        try:
            if self.is_read_data_from_csv(schema, table):
                master_status, is_parallel_create_index = self.read_data_from_csv(schema, table, cursor_manager)
            else:
                master_status, is_parallel_create_index = self.read_data_from_table(schema, table, cursor_manager)
                print("-----------------read_data_from_table完成--------")    
            indices = self.__get_index_data(schema=schema, table=table, cursor_buffered=cursor_manager.cursor_buffered)
            auto_increment_column = self.__get_auto_increment_column(schema, table, cursor_manager.cursor_buffered)
            index_write_task = CreateIndexTask(table, schema, indices, destination_schema, master_status,
                                            is_parallel_create_index, auto_increment_column)
            if not self.is_create_index:
                self.write_indextask_to_file(index_write_task)
            elif self.with_datacheck:
                self.write_task_queue.put(index_write_task)
            else:
                readers = self.source_config["readers"] if self.source_config["readers"] > 0 else 1
                if self.index_waiting_queue.qsize() > readers:
                    self.write_task_queue.put(self.index_waiting_queue.get(block=True))
                self.index_waiting_queue.put(index_write_task, block=True)
        except BaseException as exp:
            self.logger.error(exp)
            self.logger.info("Could not copy the table %s. Excluding it from the replica." % (table))
            self.read_retry_queue.put(task, block=True)
        finally:
            self.__safe_close(cursor_manager, 'cursor_manager')
            self.logger.warning("close connection for table %s.%s" % (schema, table))

    def __safe_close(self, resource, desc='', func_name='close'):
        try:
            if resource is not None and hasattr(resource, func_name):
                getattr(resource, func_name)()
        except Exception as exp:
            self.logger.warning("safe_close %s get exption: %s" % (desc, exp))
    
    def is_read_data_from_csv(self, schema, table):
        """
            The method judge copy the data from the table or csv file.

            :param schema: the origin's schema
            :param table: the table name
            :return: true if read data from the csv file
            :rtype: bool
        """
        origin_csv_file = "%s_%s.csv" % (schema, table)
        origin_csv_path = self.csv_dir + os.path.sep + origin_csv_file
        return os.path.exists(origin_csv_path)
        
    def write_indextask_to_file(self, task):
        schema = task.schema
        table = task.table
        index_line = {}
        index_list = []
        index_list.append(task.indices)
        index_list.append(task.destination_schema)
        index_list.append(task.master_status)
        index_list.append(task.is_parallel_create_index)
        index_list.append(task.auto_increment_column)
        index_line['`%s`.`%s`' % (schema, table)] = index_list
        with open(self.index_file, 'a') as fw_index:
            fw_index.write(json.dumps(index_line) + '\n')
        
    
    def read_indextask_from_file(self):
        """
            The method read index task from index_file and write to write_task_queue.
        """
        index_task_dict = {}
        with open(self.index_file, 'r') as fr_index:
            while True:
                task_line = fr_index.readline()[:-1]
                if task_line:
                    single_task_dict = json.loads(task_line)
                    for key, value in single_task_dict.items():
                        index_task_dict[key] = value
                else:
                    break
        for schema in self.schema_mappings:
            for table in self.schema_tables[schema]:
                index_infos = index_task_dict.get('`%s`.`%s`' % (schema, table))
                if index_infos is None:
                    self.logger.error('does not have indexinfo of `%s`.`%s` in index_file.' % (schema, table))
                    continue
                index_write_task = CreateIndexTask(table, schema, index_infos[0], index_infos[1],
                                                    index_infos[2], index_infos[3], index_infos[4])
                self.write_task_queue.put(index_write_task, block=True)
        self.write_task_queue.put(None, block=True)
        
    def __get_auto_increment_column(self, schema, table, cursor_buffered):
        sql_column = """
                    SELECT
                        SCHEMA_NAME(t.schema_id) AS table_schema,
                        t.name AS table_name,
                        c.name AS column_name,
                        TYPE_NAME(c.user_type_id) AS column_type
                    FROM
                        sys.tables t
                    INNER JOIN
                       sys.columns c ON t.object_id = c.object_id
                    WHERE
                        t.name = %s
                        AND SCHEMA_NAME(t.schema_id) = %s
                        AND c.is_identity = 1
                    ;
                """
        cursor_buffered.execute(sql_column, (schema, table))
        auto_increment_column = cursor_buffered.fetchall()
        return auto_increment_column

    def __get_index_data(self, schema, table, cursor_buffered):
        sql_index = """
                    SELECT
                        i.name AS index_name,
                        i.type_desc AS index_type,
                        CASE WHEN i.is_unique = 0 THEN 1 ELSE 0 END AS non_unique,
                        '' AS index_comment, 
                        (
                            SELECT
                                STRING_AGG(c.name, ', ') WITHIN GROUP (ORDER BY ic.index_column_id)
                            FROM
                                sys.index_columns ic
                                INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
                            WHERE
                                ic.object_id = i.object_id
                                AND ic.index_id = i.index_id
                        ) AS index_columns,
                        NULL AS sub_part
                    FROM
                        sys.indexes i
                    WHERE
                        i.object_id = OBJECT_ID(%s + '.' + %s)
                        AND (i.type_desc = 'CLUSTERED' OR i.type_desc = 'NONCLUSTERED' OR i.type_desc = 'XML')
                    ;
                """
        cursor_buffered.execute(sql_index, (schema, table))
        index_data = cursor_buffered.fetchall()
        print(index_data)
        return index_data

    def read_data_from_table(self, schema, table, cursor_manager):
        self.logger.debug("Reading data from table according to select query")
        self.logger.debug("Estimating rows in %s.%s" % (schema, table))
        print("---------read_data_from_table--------")
    
        # Use a default copy limit directly
        DEFAULT_COPY_LIMIT = 10000  # Example default value; adjust as needed

        sql_rows = """
            SELECT SUM(rows) as table_rows
            FROM sys.partitions p
            INNER JOIN sys.tables t ON p.object_id = t.object_id
            INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
            WHERE s.name = %s
            AND t.name = %s;
        """
        
  
        cursor_buffered = cursor_manager.cursor_buffered
        cursor_unbuffered = cursor_manager.cursor_unbuffered

        cursor_buffered.execute(sql_rows, (schema, table))
        count_rows = cursor_buffered.fetchone()
       
        total_rows = count_rows["table_rows"]
        print(total_rows)
        query = """
            SELECT SUM(ps.in_row_data_page_count) AS total_in_row_data_pages
            FROM sys.dm_db_partition_stats AS ps
            INNER JOIN sys.tables AS t ON ps.object_id = t.object_id
            INNER JOIN sys.schemas AS s ON t.schema_id = s.schema_id
            WHERE s.name = %s
            AND t.name = %s;
        """
        params = (schema, table)
        cursor_buffered.execute(query, params)
        in_row_data_page_count = cursor_buffered.fetchone()
        # Set copy limit to default
        copy_limit = 0
        total_pages = in_row_data_page_count["total_in_row_data_pages"]
        page_size = 8192  # 默认的页大小为8KB
        # 计算行内数据的总大小（字节）
        total_data_size = total_pages * page_size
        # 计算平均行长度（字节）
        avg_row_length = total_data_size / total_rows
        count_rows["avg_row_length"] = avg_row_length
        print(count_rows)

    
        if self.with_datacheck:
            copy_limit = self.slice_size  # Override if with_datacheck is enabled
    
        if copy_limit == 0:
            copy_limit = DATA_NUM_FOR_A_SLICE_CSV  # Fallback to another default if needed

        num_slices = int(total_rows // copy_limit)
        total_slices = num_slices + 1

        self.logger.debug("The table %s.%s will be copied in %s estimated slice(s) of %s rows"
                          % (schema, table, total_slices, copy_limit))
        # Lock the table and flush the cache
        self.lock_table(schema, table, cursor_buffered)
        # Get master status
        self.logger.debug("Collecting the master's coordinates for table `%s`.`%s`" % (schema, table))
        master_status = self.get_master_coordinates(cursor_buffered)
        is_parallel_create_index = total_rows >= DATA_NUM_FOR_PARALLEL_INDEX
        pk_column = None
        if self.with_datacheck:
            pk_column = self.get_pk_column(schema, table, cursor_unbuffered)
    
        select_columns = self.generate_select_statements(schema, table, cursor_buffered, pk_column)
        sql_csv = self.generate_sql_csv(schema, table, select_columns, pk_column)

        self.logger.debug("Executing query for table %s.%s" % (schema, table))

        task_slice = 0
        copydatatask_list = []
        self.flow_control(schema, table)
        print(sql_csv)
        with self.reader_xact(self, cursor_buffered):
            cursor_buffered.execute(sql_csv)

#            self.logger.debug("Finish executing query for table %s.%s" % (schema, table))
            while True:
                out_file = '%s/%s/%s_%s_slice%d.csv' % (self.out_dir, CSV_DATA_SUB_DIR, schema, table, task_slice + 1)
                try:
                    csv_results = cursor_buffered.fetchmany(copy_limit)
                except Exception as exp:
                    self.logger.error("Exception caught in fetchmany: %s" % exp)
                    raise
    
                if len(csv_results) == 0:
                    break

                # Prepare CSV data
                csv_data = ("\n".join([str(d['data']) for d in csv_results])).replace('\x00', '')
#                csv_data = csv_results 
                if self.copy_mode == 'file':
                    csv_file = codecs.open(out_file, 'wb', self.charset, buffering=-1)
                    csv_file.write(csv_data)
                    csv_file.close()
                    task = CopyDataTask(out_file, count_rows, table, schema, select_columns, len(csv_results),
                                        task_slice)
                else:
                    if self.copy_mode != 'direct':
                        self.logger.warning("Unknown copy mode, using direct mode instead")
                    csv_file = io.BytesIO()
                    csv_file.write(csv_data.encode())
                    csv_file.seek(0)
                    task = CopyDataTask(csv_file, count_rows, table, schema, select_columns, len(csv_results), task_slice)

                if self.with_datacheck:
                    if pk_column:
                        task.idx_pair = Pair(csv_results[0][1], csv_results[-1][1])
                    copydatatask_list.append(task)
                
                self.write_task_queue.put(task, block=True)
                task_slice += 1
                self.logger.info("Table %s.%s generated %s slice of %s" % (schema, table, task_slice, total_slices))
                
            if self.is_skip_completed_tables:
                self.handle_migration_progress(schema, table, task_slice)
        self.__add_tableslices_to_reader(schema, table, task_slice, copydatatask_list, count_rows, select_columns)
        return [master_status, is_parallel_create_index]

    def __add_tableslices_to_reader(self, schema, table, task_slice, copydatatask_list, count_rows, select_columns):
        if self.with_datacheck:
            self.total_slice_dict['`%s`.`%s`' % (schema, table)] = task_slice
            if task_slice == 0:
                csv_file = '%s/%s/%s_%s_slice%d.csv' % (self.out_dir, CSV_DATA_SUB_DIR, schema, table, task_slice + 1)
                task = CopyDataTask(csv_file, count_rows, table, schema, select_columns, 0, -1)
                open(csv_file, 'w').close()
                self.write_task_queue.put(task)
                copydatatask_list.append(task)
            for task in copydatatask_list:
                self.reader_log_queue.put({"type":"SLICE","schema":schema,"table":table,"name":os.path.basename(task.csv_file),
                    "no":task.slice + 1,"total":task_slice,"beginIdx":task.idx_pair.first,"endIdx":task.idx_pair.second,"slice":task_slice > 1})
  
    
    # Use an inner class to represent transactions
    class reader_xact:
        def __init__(self, outer_obj, cursor):
            self.outer_obj = outer_obj
            self.cursor = cursor

        def __enter__(self):
            self.outer_obj.begin_tx(self.cursor)
              

        def __exit__(self, exc_type, exc_val, exc_tb):
            self.outer_obj.end_tx(self.cursor)

    def begin_tx(self, cursor):
        """
        Sets the isolation level and begins a transaction in SQL Server
        """
        self.logger.debug("setting isolation level and beginning transaction")
        cursor.execute("BEGIN TRANSACTION;")
    def end_tx(self, cursor):
        """
        Ends the current transaction by committing in SQL Server
        """
        self.logger.debug("committing transaction")
        cursor.execute("COMMIT TRANSACTION;")


    
    def flow_control(self, schema, table):
        """
        The method pause the reader process until flow control ends
        """
        log_printed = False
        if self.with_datacheck and self.stop_data_reader.get():
            while True:
                if not self.stop_data_reader.get():
                    self.logger.info(("flow control for `%s`.`%s` finished, read process start.") % (schema, table))
                    break
                elif self.stop_data_reader.get() and not log_printed:
                    self.logger.info(("read for `%s`.`%s` stopped, wait for datacheck......") % (schema, table))
                    log_printed = True
                time.sleep(1)


                
    

    def generate_sql_csv(self, schema, table, select_columns, pk_column=None):


        # Basic SQL query to get CSV data
        print("--------generate_sql_csv------------")
        sql_csv = "SELECT (%s) as data FROM [%s].[%s];" %\
                  (select_columns.get("select_csv"), schema, table)
    
        # If data check is enabled and a primary key column is provided, add primary key data and order by primary key
        if self.with_datacheck and pk_column:
            sql_csv = "SELECT (%s) as data, (%s) as pk_data FROM [%s].[%s] ORDER BY [%s];" %\
                      (select_columns.get("select_csv"), select_columns.get("select_pk"), schema, table, pk_column)
                      
        print("--------generate_sql_csv完成------------")
        return sql_csv


    def get_pk_column(self, schema, table, cursor=None):
    
       pk_list = None
       # SQL query to get primary key column in SQL Server
       sql_pkname = """
           SELECT c.name
           FROM sys.indexes i
           INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
           INNER JOIN sys.columns c ON ic.object_id = c.object_id AND c.column_id = ic.column_id
           INNER JOIN sys.tables t ON i.object_id = t.object_id
           WHERE i.is_primary_key = 1
           AND SCHEMA_NAME(t.schema_id) = %s
           AND t.name = %s;
       """
       if cursor is None:
           self.cursor_buffered.execute(sql_pkname, (schema, table))
           pk_list = self.cursor_buffered.fetchall()
       else:
           cursor.execute(sql_pkname, (schema, table))
           pk_list = cursor.fetchall()
    
       if pk_list and len(pk_list) == 1 and len(pk_list[0]) == 1:
           return pk_list[0][0]

    def lock_table(self, schema, table, cursor):
        """
        Locks the specified table with appropriate lock in SQL Server.

        :param schema: The schema of the table
        :param table: The table name
        :param cursor: The cursor object
        """
        self.logger.debug("Locking the table `%s`.`%s`" % (schema, table))
        sql_lock = """
            BEGIN TRANSACTION;
            SELECT * FROM %s.%s WITH (TABLOCKX, HOLDLOCK);
        """ % (schema, table)
        
        cursor.execute(sql_lock)

    def read_data_from_csv(self, schema, table, cursor_manager):
        """
            The method copy the data from existed csv file.

            :param schema: the origin's schema
            :param table: the table name
            :param cursor_manager: the cursor manager
            :return: the log coordinates for the given table and parallel create index flag
            :rtype: list
        """
        self.logger.info("find csv file for table %s.%s, and directly use csv file for copy, omit select query"
                         " from mysql" % (schema, table))
        self.logger.debug("estimating rows in %s.%s" % (schema, table))

        cursor_buffered = cursor_manager.cursor_buffered
        # get master status
        self.logger.debug("collecting the master's coordinates for table `%s`.`%s`" % (schema, table))
        master_status = self.get_master_coordinates(cursor_buffered)

        origin_csv_file = "%s_%s.csv" % (schema, table)
        origin_csv_path = self.csv_dir + os.path.sep + origin_csv_file
        self.logger.info("statistic line number for %s file" % origin_csv_path)
        line_num_cmd = "wc -l -c %s | awk -F \" \" '{print$1,$2}'"
        line_num_and_bytes = os.popen(line_num_cmd % origin_csv_path).read().strip()
        line_num = int(line_num_and_bytes.split(" ")[0])
        bytes_num = int(line_num_and_bytes.split(" ")[1])
        avg_row_length = bytes_num // line_num

        self.logger.info("statistic line number %s for %s file" % (line_num, origin_csv_path))

        self.logger.info("generate table and column metadata csv file")
        is_parallel_create_index = line_num >= DATA_NUM_FOR_PARALLEL_INDEX
        select_columns = self.generate_select_statements(schema, table, cursor_buffered)

        count_rows = {"table_rows": line_num, "copy_limit": DATA_NUM_FOR_A_SLICE_CSV, "avg_row_length": avg_row_length}
        total_slice = math.ceil(line_num / DATA_NUM_FOR_A_SLICE_CSV)
        suffix_length = len(str(total_slice))
        file_suffix = "%s_%s_slice" % (schema, table)
        self.logger.info("split csv file for table %s.%s into %s slices of %s rows"
                         % (schema, table, total_slice, DATA_NUM_FOR_A_SLICE_CSV))
        split_cmd = "split -l %s %s -d -a %s /%s/%s && ls %s | grep %s" \
                    % (DATA_NUM_FOR_A_SLICE_CSV, origin_csv_path, suffix_length, self.csv_dir, file_suffix,
                       self.csv_dir, file_suffix)
        self.logger.info(split_cmd)
        file_list = os.popen(split_cmd).readlines()
        self.logger.info("finish splitting csv files")

        for file_name in file_list:
            split_file_name = file_name.strip()
            index = int(split_file_name[-1 * suffix_length:])
            csv_file = file_suffix + str(index + 1) + ".csv"
            generated_csv_path = self.out_dir + os.path.sep + CSV_DATA_SUB_DIR + os.path.sep + csv_file
            split_csv = self.csv_dir + os.path.sep + split_file_name
            if os.system("mv %s %s" % (split_csv, generated_csv_path)) == 0:
                csv_len = int(str(os.popen(line_num_cmd % generated_csv_path).read()).strip().split(" ")[0])
            else:
                self.logger.error("mv csv file to out_dir failed for table {}.{}", schema, table)
            if self.contains_columns and index == 0:
                # read column name list
                with open(generated_csv_path, 'r') as f:
                    first_line = f.readline()
                    select_columns["column_list"] = first_line.strip()
                task = CopyDataTask(generated_csv_path, count_rows, table, schema, select_columns, csv_len, index,
                                      True, self.column_split)
            else:
                task = CopyDataTask(generated_csv_path, count_rows, table, schema, select_columns, csv_len, index,
                                      False, self.column_split)
            self.write_task_queue.put(task, block=True)
            self.logger.info("Table %s.%s generated %s slices of total %s slices"
                             % (schema, table, index+1, len(file_list)))
        if self.is_skip_completed_tables:
            self.handle_migration_progress(schema, table, len(file_list))
        self.logger.info("Table %s.%s generated %s slices" % (schema, table, len(file_list)))
        return [master_status, is_parallel_create_index]
    
    
    def generate_select_statements(self, schema, table, cursor=None, pk_column=None):
        """
        Generates the SQL statements for selecting data in formats suitable for CSV and INSERT for SQL Server.

        :param schema: The origin's schema
        :param table: The table name
        :param cursor: The cursor
        :param pk_column: Primary key column name
        :return: Dictionary with select statements for CSV and INSERT, and column list
        :rtype: dictionary
        """
        random = secrets.token_hex(16)
        select_columns = {}
        sql_select = """
            SELECT
                CASE
                    WHEN data_type = 'image' THEN
                        'NULL'
                    WHEN data_type IN ('binary', 'varbinary') THEN 
                        'CONVERT(varchar(max), ' + column_name + ', 1)'
                    WHEN data_type = 'bit' THEN 
                        'CASE WHEN ' + column_name + ' = 1 THEN ''1'' ELSE ''0'' END'
                    WHEN data_type IN ('geometry', 'geography') THEN 
                        'CONVERT(varchar(max), ' + column_name + '.ToString())'
                    ELSE
                        'CAST(' + column_name + ' AS varchar(max))'
                END AS select_csv,
                column_name
            FROM
                INFORMATION_SCHEMA.COLUMNS
            WHERE
                TABLE_SCHEMA = %s
                AND TABLE_NAME = %s
            ORDER BY
                ORDINAL_POSITION
            ;
        """
        print("select_data开始")

        if cursor is None:
            self.cursor_buffered.execute(sql_select, (schema, table))
            select_data = self.cursor_buffered.fetchall()
        else:
            cursor.execute(sql_select, (schema, table))
            select_data = cursor.fetchall()
        print("select_data")

        print(select_data)
        csv_statement = "COALESCE(REPLACE(%s, '\"', '\"\"'), '{}') ".format(random)
        select_csv = [csv_statement % statement["select_csv"] for statement in select_data]
        select_stat = [statement["select_csv"] for statement in select_data]
        column_list = ['%s' % statement["column_name"] for statement in select_data]

        select_columns["select_csv"] = "REPLACE(CONCAT('\"', CONCAT_WS('\",\"', %s), '\"'), '\"%s\"', 'NULL')" % (','.join(select_csv), random)
        select_columns["select_stat"] = ','.join(select_stat)
        select_columns["column_list"] = ','.join(column_list)
        select_columns["column_list_select"] = select_columns["column_list"]

        if self.with_datacheck and pk_column:
            for statement in select_data:
                if statement.get("column_name") == pk_column:
                    select_columns["select_pk"] = "REPLACE(%s, '\"', '\"\"')" % (statement.get("select_csv"))
                    break

        print(select_columns)
        return select_columns

    



