import datetime
import re

from modules.base import Base


def matched_keywords_counts(source, matched_keywords):
    """
    匹配关键字在指定字符串中出现的次数
    :param source: 指定的字符串
    :param matched_keywords: 需要匹配的关键字
    :return: 次数
    """
    pattern = re.compile(matched_keywords)
    matched_counts = len(pattern.findall(source))
    return matched_counts


class BusinessBase(Base):
    """
    数据库业务相关的基础函数
    """

    def __init__(self, **kwargs):
        super(BusinessBase, self).__init__()
        self.passwd = kwargs.get('userpasswd')
        self.ipaddress = kwargs.get('ipaddress')
        self.ipaddress_std1 = kwargs.get('ipaddress_std1')
        self.ipaddress_std2 = kwargs.get('ipaddress_std2')
        self.db_root_path = kwargs.get('db_root_path')
        self.up_db_v = kwargs.get('up_db_v')
        self.tpcc_path = kwargs.get('tpcc_path')
        self.ftp_vmap_path = kwargs.get('ftp_vmap_path')
        self.user = ''
        self.port = ''
        self.db_info = dict()
        self.tpcc_user = 'tpccuser'
        self.up_tpcc_pname = 'upgrade_props.pg'
        self.tb_disconnect_name = 't_disconnect'

    def init_business_db_data(self, up_user_prefix,
                              db_compatible_b_name='db_testb_pre'):
        """
        初始化数据库连接信息
        :param up_user_prefix: up_user_prefix，用户名前缀
        :param db_compatible_b_name: B库默认名
        :return: 无
        notes: 使用该类下其它方法之前，需先调用此函数，初始化数据库连接信息
        """
        self.log.info('初始化数据库连接信息')
        self.user = self.sh(f'ls /home|grep {up_user_prefix}')
        cmd = f"grep -w 'port =' {self.db_root_path}/{self.user}/" \
              f"cluster/dn1/postgresql.conf|awk '{{print $3}}'"
        self.port = self.sh(cmd)

        self.db_info = {'db_host': self.ipaddress,
                        'db_user': self.user,
                        'db_pwd': self.passwd,
                        'db_name': 'postgres',
                        'db_port': self.port,
                        'db_env': f'/home/{self.user}/gaussdb.bashrc',
                        'up_db_v': self.up_db_v,
                        'db_compatible_b_name': db_compatible_b_name}
        self.log.info(self.db_info)

    def get_db_v(self, ssh):
        """
        获取数据版本，并处理
        :param ssh:
        :return: 返回整数类型的数据库版本，不包含.等，如310
        """
        run_sql = 'select version();'
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        match = re.search(r'openGauss (\d+\.\d+\.\d+)', res)
        if not match:
            raise ValueError("无法从数据库版本信息中提取版本号")
        db_version = int(match.group(1).replace('.', ''))
        self.log.info(db_version)
        return db_version

    def extract_dolphin_v(self, ssh, db_name):
        """
        提取dolphin版本
        :param ssh:
        :param db_name:数据库名
        :return:返回提取的dolphin版本
        """
        res = self.ssh_run_sql(ssh, '\\dx', db_name, **self.db_info)
        self.log.info(res)
        assert 'dolphin' in res
        pattern = r'\s*(\w+)\s*\|\s*(\d+\.\d+)\s*\|\s*(\w+)\s*\|\s*(.*)\s*'
        matches = re.findall(pattern, res)
        for match in matches:
            if match[0] == 'dolphin':
                dolphin_version = match[1]
                self.log.info(f'dolphin版本: {dolphin_version}')
                return dolphin_version
        raise ValueError("未找到dolphin版本信息")

    def check_db_compatibility_b(self, db_name='db_testb_pre',
                                 t_name='t_testb_pre'):
        """
        创建兼容B库，兼容B库下dml
        :param : db_name，兼容B库数据库名
        :param : t_name，表名
        :return: db_name
        """
        self.log.info('创建兼容B库，兼容B库下dml')
        ssh = self.ssh_connect(self.ipaddress, self.db_info['db_user'],
                               self.passwd)
        run_sql = f"drop database if exists {db_name}; " \
                  f"create database {db_name} dbcompatibility 'B';"
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'CREATE DATABASE' in res

        run_sql = f'''drop table if exists {t_name} cascade;
        create table {t_name}(id int,name text);
        '''
        res = self.ssh_run_sql(ssh, run_sql, db_name, **self.db_info)
        self.log.info(res)
        assert 'CREATE TABLE' in res

        run_sql = f'''insert into {t_name} values (generate_series(1, 1000), 'testb');
        select count(*) from {t_name};
        update {t_name} set name = 'pre' where id > 500;
        '''
        res = self.ssh_run_sql(ssh, run_sql, db_name, **self.db_info)
        self.log.info(res)
        assert 'INSERT 0 1000' in res
        assert 'UPDATE 500' in res

        ssh.close()
        return db_name

    def get_dolphin_v(self, db_name=None):
        """
        B库下获取dolphin版本
        :param db_name:
        :return:返回数据库版本、dolphin版本
        """
        with self.ssh_connect(self.ipaddress, self.db_info.get('db_user'),
                              self.passwd) as ssh:
            db_version = self.get_db_version(ssh)
            if db_version >= 310:
                if self.up_db_v[0] == 300:
                    self.check_db_compatibility_b()
                self.log.info('B库下获取dolphin版本')
                db_name = self.db_info[
                    'db_compatible_b_name'] if db_name is None else db_name
                dolphin_version = self.extract_dolphin_v(ssh, db_name)
                self.log.info(
                    f'数据库版本：{db_version}，dolphin版本：{dolphin_version}')
                return db_version, dolphin_version
            else:
                self.log.info('当前版本不支持dolphin')
                return db_version, None

    def set_tpcc_props(self, db_host=None, db_port=None, db_name=None,
                       db_pwd=None, p_name='upgrade_props.pg'):
        """
        配置benchmark配置文件
        :param db_host: ip
        :param db_port: 端口
        :param db_name: 数据库名
        :param db_pwd: 密码
        :param p_name: 配置文件名
        :return:
        """
        self.log.info('配置benchmark配置文件')
        db_host = self.db_info['db_host'] if db_host is None else db_host
        db_port = self.db_info['db_port'] if db_port is None else db_port
        db_name = self.db_info['db_name'] if db_name is None else db_name
        db_pwd = self.db_info['db_pwd'] if db_pwd is None else db_pwd

        sed_cmd = f"cd {self.tpcc_path} && " \
                  f"sed -i '3cconn=jdbc:postgresql://{db_host}:{db_port}/" \
                  f"{db_name}' {p_name} && " \
                  f"sed -i '4cuser={self.tpcc_user}' {p_name} && " \
                  f"sed -i '5cpassword={db_pwd}' {p_name}"
        try:
            self.sh(sed_cmd, ispwd=True)
        except Exception as e:
            self.log.info(f'配置benchmark配置文件失败，ERROR: {str(e)}')
        else:
            self.log.info('配置benchmark配置文件完成')

    def run_tpcc(self, mode='run', p_name='upgrade_props.pg'):
        """
        benchmark数据准备、执行、清理
        :param : mode，传入build对应数据准备，run对应执行
        :param : p_name，配置文件名
        :return:无
        """
        self.log.info(f'开始执行benchmark-{mode}')
        if mode == 'run':
            s_name = 'runBenchmark.sh'
        elif mode == 'build':
            s_name = 'runDatabaseBuild.sh'
        else:
            s_name = 'runDatabaseDestroy.sh'
        run_tpcc_cmd = f"cd {self.tpcc_path} && sh {s_name} {p_name};"
        try:
            self.sh(run_tpcc_cmd, ispwd=False, is_print=False)
        except Exception as e:
            self.log.info(f'执行benchmark-{mode}失败，ERROR: {str(e)}')
        else:
            self.log.info(f'执行benchmark-{mode}成功')

    def check_tpcc_data_consistency(self, p_name='upgrade_props.pg'):
        """
        检查benchmarksql执行完成后数据是否出现错乱
        :param p_name: 配置文件名
        :return: 执行结果均为0返回true，否则返回false
        """
        self.log.info('检查benchmarksql执行完成后数据是否出现错乱')
        hosts = [self.ipaddress, self.ipaddress_std1, self.ipaddress_std2]
        self.log.info(hosts)
        cmd = f"cd {self.tpcc_path} && " \
              f"grep 'db=' {p_name} | awk -F= '{{{{print $2}}}}'"
        tpcc_db = self.sh(cmd)

        cmd = f"cd {self.tpcc_path} && " \
              f"grep 'user=' {p_name} | awk -F= '{{{{print $2}}}}'"
        tpcc_user = self.sh(cmd)
        run_sql = """(Select w_id, w_ytd from bmsql_warehouse) 
        except(select d_w_id, sum(d_ytd) from bmsql_district group by d_w_id);
        (Select d_w_id, d_id, D_NEXT_O_ID - 1 from bmsql_district) except 
        (select o_w_id, o_d_id, max(o_id) 
        from bmsql_oorder group by o_w_id, o_d_id);
        (Select d_w_id, d_id, D_NEXT_O_ID - 1 from bmsql_district) except 
        (select no_w_id, no_d_id, max(no_o_id) from bmsql_new_order 
        group by no_w_id, no_d_id);
        select * from (select (count(no_o_id)-(max(no_o_id)-min(no_o_id)+1)) as 
        diff from bmsql_new_order group by no_w_id, no_d_id) where diff != 0;
        (select o_w_id, o_d_id, sum(o_ol_cnt) from bmsql_oorder  
        group by o_w_id, o_d_id) 
        except (select ol_w_id, ol_d_id, count(ol_o_id) from bmsql_order_line 
        group by ol_w_id, ol_d_id);
        (select d_w_id, sum(d_ytd) from bmsql_district group by d_w_id) 
        except(Select w_id, w_ytd from bmsql_warehouse);
        """
        for ip in hosts:
            with self.ssh_connect(ip, self.db_info.get('db_user'),
                                  self.passwd) as ssh:
                res = self.ssh_run_sql(ssh, run_sql, db=tpcc_db,
                                       user=tpcc_user,
                                       **self.db_info)
                self.log.info(res)
                actual_counts = matched_keywords_counts(res, '0 rows')
                self.ssh_close(ssh)
                assert '0 rows' in res
                assert actual_counts == 6

    def create_row_compress(self, tb_name='t_row_compress_pre'):
        """
        预置行存压缩表（普通表、ustore表、分区表、压缩表组合）
        :param tb_name: 表名
        :return: 无
        """
        ssh = self.ssh_connect(self.ipaddress, self.db_info.get('db_user'),
                               self.passwd)

        self.log.info('01-astore普通压缩表，建表并插入数据')
        run_sql = f'''drop table if exists {tb_name}_01 cascade;
        create table {tb_name}_01(v1 int, v2 varchar(50)) with 
        (compresstype = 2, compress_chunk_size = 512,  
        compress_prealloc_chunks = 7, compress_byte_convert = true, 
        compress_diff_convert = true, compress_level=30);
        insert into {tb_name}_01 values(generate_series(0, 1000),'test');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'DROP TABLE' in res
        assert 'CREATE TABLE' in res
        assert 'INSERT 0 1001' in res

        self.log.info('02-astore 二级分区压缩表range_list，建表并插入数据')
        run_sql = f'''drop table if exists {tb_name}_02 cascade;
        create table {tb_name}_02(month_code varchar2 ( 30 ) not null ,
        dept_code  varchar2 ( 30 ) not null ,
        user_no    varchar2 ( 30 ) not null ,
        sales_amt  int) 
        with (compresstype=2, compress_level=20, compress_chunk_size=4096, 
        compress_prealloc_chunks=1, compress_byte_convert=true, 
        compress_diff_convert=true)
        partition by range (month_code) subpartition by list (dept_code)
        (partition p_202401 values less than( '202403' )
        (subpartition p_202401_a values ('1'),
        subpartition p_202401_b values ('2')),
        partition p_202402 values less than( '202410' )
        (subpartition p_202402_a values ('1'),
        subpartition p_202402_b values ('2')));
        insert into {tb_name}_02 partition (p_202402) 
        values('202406', '1', '1', 1);
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'DROP TABLE' in res
        assert 'CREATE TABLE' in res
        assert 'INSERT 0 1' in res

        self.log.info('03-astore范围分区压缩表，建表并插入数据')
        run_sql = f'''drop table if exists {tb_name}_03 cascade;
        create table {tb_name}_03(order_no integer not null,
        goods_name char(20) not null, 
        sales_date date not null) 
        with (compresstype=2, compress_level=12, compress_chunk_size=512, 
        compress_prealloc_chunks=0, compress_byte_convert=false, 
        compress_diff_convert=false)
        partition by range(sales_date)
        (
        partition season1 values less than('2024-03-01 00:00:00'),
        partition season2 values less than('2024-06-01 00:00:00'),
        partition season3 values less than('2024-09-01 00:00:00'),
        partition season4 values less than('2024-11-01 00:00:00')
        );
        insert into {tb_name}_03 values(1, 'jacket', '2024-01-10 00:00:00');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'DROP TABLE' in res
        assert 'CREATE TABLE' in res
        assert 'INSERT 0 1' in res

        self.log.info('04-astore间隔分区压缩表，建表并插入数据')
        run_sql = f'''drop table if exists {tb_name}_04 cascade;
        create table {tb_name}_04 (order_no integer not null, 
        goods_name char(20) not null, 
        sales_date date not null
        ) with (compresstype=2, compress_level=15, compress_chunk_size=512, 
        compress_prealloc_chunks=6, compress_byte_convert=true, 
        compress_diff_convert=true)
        partition by range(sales_date) interval ('1 month') (
        partition start values less than('2024-01-01 00:00:00'), 
        partition later values less than('2024-01-10 00:00:00'));
        insert into {tb_name}_04 values(1, 'jacket', '2024-01-8 00:00:00');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'DROP TABLE' in res
        assert 'CREATE TABLE' in res
        assert 'INSERT 0 1' in res

        self.log.info('05-astore List分区压缩表，建表并插入数据')
        run_sql = f'''drop table if exists {tb_name}_05 cascade;
        create table {tb_name}_05(number integer, name char(20), 
        class char(20), grade integer) with (compresstype=2, compress_level=10, 
        compress_chunk_size=1024, compress_prealloc_chunks=7, 
        compress_byte_convert=false, compress_diff_convert=false)  
        partition by list(class)  
        (partition class_01 values ('24.01'),  
        partition class_02 values ('24.02'),
        partition class_03 values ('24.03'),
        partition class_04 values ('24.04'));
        insert into {tb_name}_05 values('240101','alan','24.01',92); 
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'DROP TABLE' in res
        assert 'CREATE TABLE' in res
        assert 'INSERT 0 1' in res

        self.log.info('06-astore HASH分区压缩表，建表并插入数据')
        run_sql = f'''drop table if exists {tb_name}_06 cascade;
        create table {tb_name}_06 (col1 int, col2 int)
        with (compresstype=2, compress_level=6, 
        compress_chunk_size=1024, compress_prealloc_chunks=5, 
        compress_byte_convert=true, compress_diff_convert=true)
        partition by hash(col1)
        (
        partition p1,
        partition p2
        );
        insert into {tb_name}_06 values(1, 1);
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'DROP TABLE' in res
        assert 'CREATE TABLE' in res
        assert 'INSERT 0 1' in res

        self.ssh_close(ssh)

    def row_compress_dml(self, tb_name='t_row_compress_pre'):
        """
        行存压缩表简单dml
        :param tb_name: 表名
        :return: 无
        """
        self.log.info('01-astore普通压缩表dml')
        ssh = self.ssh_connect(self.ipaddress, self.db_info.get('db_user'),
                               self.passwd)
        run_sql = f'''update {tb_name}_01 set v2 = 'updated' where v1 <= 500;  
        select count(*) from {tb_name}_01;
        delete from {tb_name}_01;
        insert into {tb_name}_01 values(generate_series(0, 1000),'test');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        assert 'UPDATE 501' in res
        assert '1 row' in res
        assert 'DELETE 1001' in res
        assert 'INSERT 0 1001' in res

        self.log.info('02-astore 二级分区压缩表range_list dml')
        run_sql = f'''insert into {tb_name}_02 subpartition (p_202401_a) 
        values('202402', '1', '1', 1);
        insert into {tb_name}_02 partition for ('202402') 
        values('202402', '1', '1', 1);
        insert into {tb_name}_02 subpartition for ('202402','1') 
        values('202402', '1', '1', 1);
        select count(*) from {tb_name}_02 partition (p_202401);
        select count(*) from {tb_name}_02 subpartition (p_202401_a);
        update {tb_name}_02 partition (p_202401) set user_no = '2';
        delete from {tb_name}_02 partition (p_202401);
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        insert_counts = matched_keywords_counts(res, 'INSERT 0 1')
        select_counts = matched_keywords_counts(res, '1 row')
        assert 'INSERT 0 1' in res
        assert '1 row' in res
        assert 'UPDATE 3' in res
        assert 'DELETE 3' in res
        assert insert_counts == 3
        assert select_counts == 2

        self.log.info('03-astore范围分区压缩表dml')
        run_sql = f'''insert into {tb_name}_03 values(2, 'shirt', 
        '2024-5-28 12:00:00');
        select count(*) from {tb_name}_03 partition (season1);
        delete from {tb_name}_03 partition (season1);
        alter table {tb_name}_03 drop partition season1;
        alter table {tb_name}_03 add partition season1 values less than 
        ('2024-11-28 00:00:00');
        insert into {tb_name}_03 values(2, 'shirt', '2024-11-5 12:00:00');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        insert_counts = matched_keywords_counts(res, 'INSERT 0 1')
        alter_counts = matched_keywords_counts(res, 'ALTER TABLE')
        assert 'INSERT 0 1' in res
        assert '1 row' in res
        assert 'DELETE 1' in res
        assert 'ALTER TABLE' in res
        assert alter_counts == 2
        assert insert_counts == 2

        self.log.info('04-astore间隔分区压缩表dml')
        run_sql = f'''insert into {tb_name}_04 values(2, 'hat', 
        '2024-04-06 00:00:00');
        insert into {tb_name}_04 values(3, 'shirt', '2024-11-17 00:00:00');
        insert into {tb_name}_04 values(4, 'coat', '2020-10-21 00:00:00');
        select count(*) from {tb_name}_04;
        select count(*) from {tb_name}_04 partition (start);
        select count(*) from {tb_name}_04 partition (later);
        delete from {tb_name}_04 partition (sys_p2);
        alter table {tb_name}_04 drop partition sys_p2;
        insert into {tb_name}_04 values(3, 'shirt', '2021-11-17 00:00:00');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        insert_counts = matched_keywords_counts(res, 'INSERT 0 1')
        select_counts = matched_keywords_counts(res, '1 row')
        assert 'INSERT 0 1' in res
        assert '1 row' in res
        assert 'DELETE 1' in res
        assert 'ALTER TABLE' in res
        assert insert_counts == 4
        assert select_counts == 3

        self.log.info('05-astore List分区压缩表dml')
        run_sql = f'''insert into {tb_name}_05 values('240102','ben',
        '24.01',62);  
        insert into {tb_name}_05 values('240103','brain','24.01',26);
        insert into {tb_name}_05 values('240204','carl','24.02',77);  
        insert into {tb_name}_05 values('240205','david','24.02',47);  
        insert into {tb_name}_05 values('240206','eric','24.02',97);  
        select count(*) from {tb_name}_05;
        select count(*) from {tb_name}_05 partition (class_02);
        delete from {tb_name}_05 partition (class_02);
        alter table {tb_name}_05 drop partition class_02;
        alter table {tb_name}_05 add partition class_02 values ('24.02');
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        insert_counts = matched_keywords_counts(res, 'INSERT 0 1')
        select_counts = matched_keywords_counts(res, '1 row')
        alter_counts = matched_keywords_counts(res, 'ALTER TABLE')
        assert 'INSERT 0 1' in res
        assert '1 row' in res
        assert 'DELETE 3' in res
        assert 'ALTER TABLE' in res
        assert insert_counts == 5
        assert select_counts == 2
        assert alter_counts == 2

        self.log.info('06-astore HASH分区压缩表dml')
        run_sql = f'''insert into {tb_name}_06 values(2, 2);
        insert into {tb_name}_06 values(3, 3);
        insert into {tb_name}_06 values(4, 4);
        select count(*) from {tb_name}_06;
        select count(*) from {tb_name}_06 partition (p1);
        '''
        res = self.ssh_run_sql(ssh, run_sql, **self.db_info)
        self.log.info(res)
        insert_counts = matched_keywords_counts(res, 'INSERT 0 1')
        select_counts = matched_keywords_counts(res, '1 row')
        assert 'INSERT 0 1' in res
        assert '1 row' in res
        assert insert_counts == 3
        assert select_counts == 2
        self.ssh_close(ssh)

    def index(self, tbs_name='tbs_index_pre', tb_name='t_index_pre',
              index_name='index_pre'):
        """
        创建/修改/删除索引及分区表全局索引和分区索引业务
        :param tbs_name: 表空间名称
        :param tb_name: 表名
        :param index_name: 索引名
        :return: 无
        """
        with self.ssh_connect(self.ipaddress, self.db_info.get('db_user'),
                              self.passwd) as ssh:
            self.log.info('01-创建唯一索引、b-tree索引、hash索引、表达式索引、'
                          '部分索引、组合索引')
            sql = f'''drop table if exists {tb_name}_01 cascade;
            create table {tb_name}_01(id integer not null, 
            name char(16) not null, hobbies char(10));
            create unique index {index_name}_01 on {tb_name}_01(id);
            create index {index_name}_02 on {tb_name}_01 using btree(id);
            create index {index_name}_03 on {tb_name}_01 using hash(id);
            create index {index_name}_04 on {tb_name}_01(substr(hobbies,1 ,4));
            create unique index {index_name}_05 on {tb_name}_01(id) 
            where id>10;
            create index {index_name}_06 on {tb_name}_01(id, name);
            '''
            res = self.ssh_run_sql(ssh, sql, **self.db_info)
            self.log.info(res)
            create_index_counts = matched_keywords_counts(res, 'CREATE INDEX')
            assert 'DROP TABLE' in res
            assert 'CREATE TABLE' in res
            assert create_index_counts == 6

            self.log.info('02-重命名索引、设置索引不可用、重建索引、删除索引')
            sql = f'''alter index {index_name}_01 rename to {index_name}_07;
            alter index {index_name}_02 unusable;
            alter index {index_name}_02 rebuild;
            drop index if exists {index_name}_02;
            drop index if exists {index_name}_03;
            drop index if exists {index_name}_04;
            drop index if exists {index_name}_05;
            drop index if exists {index_name}_06;
            drop index if exists {index_name}_07;
            drop table if exists {tb_name}_01 cascade;
            '''
            res = self.ssh_run_sql(ssh, sql, **self.db_info)
            self.log.info(res)
            alter_index_counts = matched_keywords_counts(res, 'ALTER INDEX')
            drop_index_counts = matched_keywords_counts(res, 'DROP INDEX')
            assert 'DROP TABLE' in res
            assert 'REINDEX' in res
            assert alter_index_counts == 2
            assert drop_index_counts == 6

            self.log.info('03-列存表gin索引')
            sql = f'''
            drop table if exists {tb_name}_02 cascade;
            create table {tb_name}_02(a int, b text) with 
            (orientation = column);
            create index {index_name}_08 on {tb_name}_02 
            using gin(to_tsvector('ngram', b));
            alter index {index_name}_08 rename to {index_name}_09;
            drop index if exists {index_name}_08;
            drop index if exists {index_name}_09;
            drop table if exists {tb_name}_02 cascade;
            '''
            res = self.ssh_run_sql(ssh, sql, **self.db_info)
            self.log.info(res)
            drop_table_counts = matched_keywords_counts(res, 'DROP TABLE')
            drop_index_counts = matched_keywords_counts(res, 'DROP INDEX')
            assert 'CREATE TABLE' in res
            assert 'ALTER INDEX' in res
            assert drop_table_counts == 2
            assert drop_index_counts == 2

            self.log.info('04-分区表全局分区索引、分区索引')
            sql = f'''
            drop tablespace if exists {tbs_name}_01;
            drop tablespace if exists {tbs_name}_02;
            drop tablespace if exists {tbs_name}_03;
            drop tablespace if exists {tbs_name}_04;
            create tablespace {tbs_name}_01 relative 
            location 'tablespace1/tablespace_1';
            create tablespace {tbs_name}_02 relative 
            location 'tablespace2/tablespace_2';
            create tablespace {tbs_name}_03 relative 
            location 'tablespace3/tablespace_3';
            create tablespace {tbs_name}_04 relative 
            location 'tablespace4/tablespace_4';
            drop table if exists {tb_name}_03 cascade;
            create table {tb_name}_03(id integer not null, 
            address char(16) not null, 
            street char(10))tablespace {tbs_name}_01 
            partition by range(id)(partition p1 values less than (3000), 
            partition p2 values less than (5000) tablespace {tbs_name}_01, 
            partition p3 values less than (maxvalue) 
            tablespace {tbs_name}_02)enable row movement;
            --创建分区表索引，不指定索引分区的名称
            create index {index_name}_10 on {tb_name}_03(id) local; 
            --创建分区表索引，并指定索引分区的名称
            create index {index_name}_11 on {tb_name}_03(id) 
            local(partition id_index1, 
            partition id_index2 tablespace {tbs_name}_03, 
            partition id_index3 tablespace {tbs_name}_04) 
            tablespace {tbs_name}_02;
            --创建global分区索引
            create index {index_name}_12 on {tb_name}_03(address) global;
            --不指定关键字，默认创建global分区索引
            create index {index_name}_13 on {tb_name}_03(address);
            --修改分区表索引的表空间
            alter index {index_name}_11 move partition id_index2 
            tablespace {tbs_name}_01;
            alter index {index_name}_11 move partition id_index3 
            tablespace {tbs_name}_02;
            --重命名分区表索引。
            alter index {index_name}_11 rename partition id_index1 to id_index4;
            drop index {index_name}_10;
            drop index {index_name}_11;
            drop table if exists {tb_name}_03 cascade;
            drop tablespace if exists {tbs_name}_01;
            drop tablespace if exists {tbs_name}_02;
            drop tablespace if exists {tbs_name}_03;
            drop tablespace if exists {tbs_name}_04;
            '''
            res = self.ssh_run_sql(ssh, sql, **self.db_info)
            self.log.info(res)
            drop_tps_counts = matched_keywords_counts(res, 'DROP TABLESPACE')
            create_tps_counts = matched_keywords_counts(res,
                                                        'CREATE TABLESPACE')
            create_index_counts = matched_keywords_counts(res, 'CREATE INDEX')
            drop_index_counts = matched_keywords_counts(res, 'DROP INDEX')
            alter_index_counts = matched_keywords_counts(res, 'ALTER INDEX')
            assert 'DROP TABLE' in res
            assert 'CREATE TABLE' in res
            assert drop_tps_counts == 8
            assert create_tps_counts == 4
            assert create_index_counts == 4
            assert drop_index_counts == 2
            assert alter_index_counts == 3

    def check_table_consistency(self):
        """
        查询创建的各个表，主备数据是否一致，select语句、checksum
        :return: 一致返回True，否则返回false
        """
        self.log.info('校验各表主备数据一致')
        count_list = []
        checksum_list = []
        hosts = [self.ipaddress, self.ipaddress_std1, self.ipaddress_std2]
        self.log.info(hosts)

        with self.ssh_connect(hosts[0], self.db_info.get('db_user'),
                              self.passwd) as ssh:
            get_tbs = "select tablename from pg_tables where schemaname='public';"
            self.log.info(get_tbs)
            res = self.ssh_run_sql(ssh, get_tbs, **self.db_info)
            self.log.info(res)
            tables = res.splitlines()[2:-1]
            self.log.info(tables)

        for ip in hosts:
            with self.ssh_connect(ip, self.db_info.get('db_user'),
                                  self.passwd) as ssh:
                count_cmd = [f"select count(*) from {i};" for i in tables]
                self.log.info(count_cmd)
                checksum_cmd = [
                    f"select checksum({i}::text) from {i};" for i in tables]
                self.log.info(checksum_cmd)
                count_res = [self.ssh_run_sql(ssh, count_cmd[i],
                                              **self.db_info).splitlines()[
                                 -2].strip() for i in range(0, len(tables))]
                self.log.info(count_res)
                checksum_res = [self.ssh_run_sql(ssh, checksum_cmd[i],
                                                 **self.db_info).splitlines()[
                                    -2].strip() for i in range(0, len(tables))]
                self.log.info(checksum_res)

                count_list.append(count_res)
                checksum_list.append(checksum_res)
                self.log.info(count_list)
                self.log.info(checksum_list)

        return all(set(x) == set(count_list[0]) for x in count_list) and all(
            set(x) == set(checksum_list[0]) for x in checksum_list)

    def logical_replication(self, slot_name='slot_pre',
                            tb_name='t_logical_replic_pre', create=True):
        """
        创建逻辑复制槽，建表dml，查看逻辑解码
        :param slot_name: 逻辑复制槽名称
        :param tb_name: 表名
        :param create: 默认为true，代表创建逻辑复制创建表，增删改查，为false仅查询逻辑解码
        :return:返回逻辑解码的结果
        """
        self.log.info('逻辑复制业务')
        with self.ssh_connect(self.ipaddress, self.db_info.get('db_user'),
                              self.passwd) as ssh:
            if create:
                sql = f"select * from pg_create_logical_replication_slot(" \
                      f"'{slot_name}', 'mppdb_decoding');"
                res = self.ssh_run_sql(ssh, sql, **self.db_info)
                self.log.info(res)
                assert 'slotname' in res
                assert '1 row' in res

                sql = f"drop table if exists {tb_name}; " \
                      f"create table {tb_name}(v1 int, v2 varchar(20));"
                res = self.ssh_run_sql(ssh, sql, **self.db_info)
                self.log.info(res)
                assert 'DROP TABLE' in res
                assert 'CREATE TABLE' in res

                sql = f"insert into {tb_name} values(1, 'test');"
                res = self.ssh_run_sql(ssh, sql, **self.db_info)
                self.log.info(res)
                assert 'INSERT 0 1' in res

                sql = f"update {tb_name} set v2 = 'text';"
                res = self.ssh_run_sql(ssh, sql, **self.db_info)
                self.log.info(res)
                assert 'UPDATE 1' in res

                sql = f"select count(*) from {tb_name};"
                res = self.ssh_run_sql(ssh, sql, **self.db_info)
                self.log.info(res)
                assert '1 row' in res

                sql = f"delete from {tb_name};"
                res = self.ssh_run_sql(ssh, sql, **self.db_info)
                self.log.info(res)
                assert 'DELETE 1' in res

            slot_sql = f"select * from pg_logical_slot_peek_changes(" \
                       f"'{slot_name}', NULL, 11);"
            sql_res = self.ssh_run_sql(ssh, slot_sql, **self.db_info)
            self.log.info(sql_res)
            res = re.sub(r'^.*WARNING.*$\n?', '', sql_res,
                         flags=re.MULTILINE).strip()
            assert '11 rows' in res
            return res

    def get_disconnect_time(self):
        """
        获取灰度升级过程中闪断时间
        :return: 闪断时间差
        """
        self.log.info('获取灰度升级过程中闪断时间')
        time1 = datetime.datetime.now()
        self.log.info(f'开始监控时间：{time1}')
        found_disconnect = False
        start_time = ''
        disconnect_keywords = 'failed to connect'
        success_keywords = 'INSERT 0 1000'
        with self.ssh_connect(self.ipaddress, self.db_info['db_user'],
                              self.passwd) as ssh:
            run_sql = f'drop table if exists {self.tb_disconnect_name};' \
                      f'create table {self.tb_disconnect_name}(v1 int);'
            self.ssh_run_sql(ssh, run_sql, **self.db_info)
            while (datetime.datetime.now() - time1).seconds < 1000:
                sql = f'insert into {self.tb_disconnect_name} values ' \
                      f'(generate_series(1, 1000));'
                cmd = f"source {self.db_info['db_env']}; " \
                      f"gsql -d {self.db_info['db_name']} " \
                      f"-p {self.db_info['db_port']} -c '{sql}'"
                res = self.ssh_run(ssh, cmd, True, False)
                if disconnect_keywords in res and not found_disconnect:
                    start_time = datetime.datetime.now()
                    self.log.info(f'断连开始时间：{start_time}')
                    found_disconnect = True
                elif success_keywords in res and found_disconnect:
                    end_time = datetime.datetime.now()
                    self.log.info(f'断连再重连成功的时间：{end_time}')
                    time_diff = (end_time - start_time).seconds
                    self.log.info(f'灰度升级过程中闪断时间：{time_diff}')
                    time2 = datetime.datetime.now()
                    self.log.info(f'结束监控时间：{time2}')
                    return time_diff
