"""
Case Type   : 行存压缩
Case Name   : 普通/分区/二级分区表，主机修改压缩参数，主机循环进行增删改查操作后，使用gs_dump/gs_restore导出恢复，校验数据一致
Create At   : 2023/8/14
Owner       : li-xin12345
Description :
    1、修改配置文件postgresql.conf中增量检查点为off
    2、建普通非压缩表，并插入数据，修改为压缩表
    3、建分区压缩表，并插入数据，修改压缩参数
    4、建二级分区压缩表，并插入数据，修改为非压缩表
    5、主机三表循环执行增删改查操作
    6、查看数据，导出
    7、恢复
    8、查看数据，对比导出前
    9、清理环境
Expect      :
    1、成功
    2、成功
    3、成功
    4、成功
    5、成功
    6、成功
    7、成功
    8、一致
    9、成功
History     :
"""

import os
import unittest
import re

from yat.test import Node
from yat.test import macro

from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class RowCompressTest(unittest.TestCase):
    def setUp(self):
        self.log = Logger()
        self.log.info(f'-----{os.path.basename(__file__)} start-----')
        self.pri_sh = CommonSH('PrimaryDbUser')
        self.pri_user = Node('PrimaryDbUser')
        self.constant = Constant()
        self.common = Common()
        self.tb_name = ['t_alter_rowcompress_0085_01',
                        't_alter_rowcompress_0085_02',
                        't_alter_rowcompress_0085_03']
        self.dump_path = os.path.join(os.path.dirname(
            macro.DB_INSTANCE_PATH), 'f_aler_rowcompress_0085')
        self.create_compress_param = ['compresstype=1', 'compresstype = 2']
        self.alter_compress_param = ['compresstype=2, '
                                     'compress_byte_convert=true, '
                                     'compress_diff_convert=true, '
                                     'compress_level=16, '
                                     'compress_chunk_size=2048, '
                                     'compress_prealloc_chunks=3',
                                     'compresstype=2, '
                                     'compress_byte_convert=true, '
                                     'compress_diff_convert=true, '
                                     'compress_level=31, '
                                     'compress_chunk_size=2048, '
                                     'compress_prealloc_chunks=2',
                                     'compresstype=0']
        self.default_param = self.common.show_param(
            'enable_incremental_checkpoint')
        self.log.info(self.default_param)

    def test_row_compress(self):
        text = '-----step1:修改配置文件postgresql.conf中增量检查点为off;' \
               'expect:成功-----'
        self.log.info(text)
        if self.default_param != 'off':
            res = self.pri_sh.execute_gsguc(
                'reload', self.constant.GSGUC_SUCCESS_MSG,
                'enable_incremental_checkpoint=off')
            self.log.info(res)
            self.assertTrue(res)

        text = '-----step2:建普通非压缩表，并插入数据，修改为压缩表;expect:成功-----'
        self.log.info(text)
        sql = f'''drop table if exists {self.tb_name[0]} cascade;
        create table {self.tb_name[0]}(columnone integer, 
        columntwo char(50), columnthree varchar(50), columnfour integer, 
        columnfive char(50), columnsix varchar(50), columnseven char(50), 
        columneight char(50), columnnine varchar(50), columnten varchar(50), 
        columneleven char(50), columntwelve char(50), 
        columnthirteen varchar(50), columnfourteen char(50), 
        columnfifteem varchar(50));
        insert into {self.tb_name[0]} values(generate_series(0, 1000), 
        'test',substring(md5(random()::text), 1, 16), 
        2, substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16));
        '''
        sql_res = self.pri_sh.execut_db_sql(sql)
        self.log.info(sql_res)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, sql_res,
                      '执行失败：' + text)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS, sql_res,
                      '执行失败：' + text)
        self.assertIn('INSERT 0 1001', sql_res, '执行失败：' + text)

        text = '---修改为压缩表---'
        self.log.info(text)
        sql = f'alter table {self.tb_name[0]} set ' \
              f'({self.alter_compress_param[0]}); checkpoint;' \
              f'\\d+ {self.tb_name[0]}'
        sql_res = self.pri_sh.execut_db_sql(sql)
        self.log.info(sql_res)
        self.assertIn(self.constant.ALTER_TABLE_MSG, sql_res,
                      '执行失败：' + text)
        self.assertIn(self.constant.CHECKPOINT_SUCCESS_MSG, sql_res,
                      '执行失败：' + text)
        self.assertIn(f'{self.alter_compress_param[0]}', sql_res,
                      '执行失败：' + text)

        text = '-----step3:建分区压缩表，并插入数据，修改压缩参数;expect:成功-----'
        self.log.info(text)
        sql = f'''drop table if exists {self.tb_name[1]} cascade;
        create table {self.tb_name[1]}
        (prod_id number(6),
         cust_id number,
         time_id date,
         channel_id char(1),
         promo_id number(6),
         quantity_sold number(3),
         amount_sold number(10,2)
        )
        with ({self.create_compress_param[0]})
        partition by range (time_id)
        interval('1 day')
        ( partition p1 values less than ('2023-02-01 00:00:00'),
          partition p2 values less than ('2023-02-02 00:00:00')
        );
        insert into {self.tb_name[1]} values(generate_series(0, 1000), 
        lpad(floor(random() * 1000)::text, 2, '0'), 
        '2023-01-10 00:00:00', 'a', 1, 1, 1);
        insert into {self.tb_name[1]} values(generate_series(0, 1000), 
        lpad(floor(random() * 1000)::text, 2, '0'), 
        '2023-02-01 00:00:00', 'a', 2, 2, 2);
        '''
        sql_res = self.pri_sh.execut_db_sql(sql)
        self.log.info(sql_res)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, sql_res,
                      '执行失败：' + text)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS, sql_res,
                      '执行失败：' + text)
        self.assertIn('INSERT 0 1001', sql_res, '执行失败：' + text)
        self.assertEqual(sql_res.count('INSERT 0 1001'), 2, '执行失败：' + text)

        text = '---修改压缩参数---'
        self.log.info(text)
        sql = f'alter table {self.tb_name[1]} set ' \
              f'({self.alter_compress_param[1]}); checkpoint;' \
              f'\\d+ {self.tb_name[1]}'
        sql_res = self.pri_sh.execut_db_sql(sql)
        self.log.info(sql_res)
        self.assertIn(self.constant.ALTER_TABLE_MSG, sql_res,
                      '执行失败：' + text)
        self.assertIn(self.constant.CHECKPOINT_SUCCESS_MSG, sql_res,
                      '执行失败：' + text)
        self.assertIn(f'{self.alter_compress_param[1]}', sql_res,
                      '执行失败：' + text)

        text = '-----step4:建二级分区压缩表，并插入数据，修改为非压缩表;' \
               'expect:成功-----'
        self.log.info(text)
        sql = f'''drop table if exists {self.tb_name[2]} cascade;
        create table {self.tb_name[2]}
        (
            month_code varchar2 ( 30 ) not null ,
            dept_code  varchar2 ( 30 ) not null ,
            user_no    varchar2 ( 30 ) not null ,
            sales_amt  int
        )
        with ({self.create_compress_param[1]})
        partition by list (month_code) subpartition by list (dept_code)
        (
          partition p_201901 values ( '201902' )
          (
            subpartition p_201901_a values ( '1' ),
            subpartition p_201901_b values ( '2' )
          ),
          partition p_201902 values ( '201903' )
          (
            subpartition p_201902_a values ( '1' ),
            subpartition p_201902_b values ( '2' )
          )
        );
        insert into {self.tb_name[2]} values('201902', '1', '1', 1);
        insert into {self.tb_name[2]} values('201902', '2', '1', 1);
        insert into {self.tb_name[2]} values('201902', '1', '1', 1);
        insert into {self.tb_name[2]} values('201903', '2', '1', 1);
        insert into {self.tb_name[2]} values('201903', '1', '1', 1);
        insert into {self.tb_name[2]} values('201903', '2', '1', 1);
        '''
        sql_res = self.pri_sh.execut_db_sql(sql)
        self.log.info(sql_res)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, sql_res,
                      '执行失败：' + text)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS, sql_res,
                      '执行失败：' + text)
        self.assertIn('INSERT 0 1', sql_res, '执行失败：' + text)
        self.assertEqual(sql_res.count('INSERT 0 1'), 6, '执行失败：' + text)

        text = '---改为非压缩表-----'
        self.log.info(text)
        sql = f'alter table {self.tb_name[2]} set ' \
              f'({self.alter_compress_param[2]}); checkpoint;' \
              f'\\d+ {self.tb_name[2]}'
        sql_res = self.pri_sh.execut_db_sql(sql)
        self.log.info(sql_res)
        self.assertIn(self.constant.ALTER_TABLE_MSG, sql_res,
                      '执行失败：' + text)
        self.assertIn(self.constant.CHECKPOINT_SUCCESS_MSG, sql_res,
                      '执行失败：' + text)
        self.assertIn(f'{self.alter_compress_param[2]}', sql_res,
                      '执行失败：' + text)

        text = '-----step5:主机三表循环执行增删改查操作;expect:成功-----'
        self.log.info(text)
        sql = f'''update {self.tb_name[0]} set columnone = 9999 
        where columnone <= 500;
        update {self.tb_name[1]} set prod_id = 9999 where prod_id <= 500;
        update {self.tb_name[2]} set sales_amt = 33 where dept_code = '1';
        insert into {self.tb_name[0]} values(generate_series(0, 1000), 
        'test',substring(md5(random()::text), 1, 16), 
        2, substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16), 
        substring(md5(random()::text), 1, 16));
        insert into {self.tb_name[1]} values(generate_series(0, 1000), 
        lpad(floor(random() * 1000)::text, 2, '0'), 
        '2023-01-10 00:00:00', 'a', 1, 1, 1);
        insert into {self.tb_name[1]} values(generate_series(0, 1000), 
        lpad(floor(random() * 1000)::text, 2, '0'), 
        '2023-02-01 00:00:00', 'a', 2, 2, 2);
        insert into {self.tb_name[2]} values('201902', '1', '1', 1);
        insert into {self.tb_name[2]} values('201902', '2', '1', 1);
        insert into {self.tb_name[2]} values('201902', '1', '1', 1);
        insert into {self.tb_name[2]} values('201903', '2', '1', 1);
        insert into {self.tb_name[2]} values('201903', '1', '1', 1);
        insert into {self.tb_name[2]} values('201903', '2', '1', 1);
        delete from {self.tb_name[0]} where columnone = (
        select columnone from {self.tb_name[0]} order by random() limit 1);
        delete from {self.tb_name[1]} where prod_id = (
        select prod_id from {self.tb_name[1]} order by random() limit 1);
        delete from {self.tb_name[2]} where sales_amt = (
        select sales_amt from {self.tb_name[2]} order by random() limit 1);
        select count(*) from {self.tb_name[0]};
        select count(*) from {self.tb_name[1]};
        select count(*) from {self.tb_name[2]};
        '''
        dml_res = self.pri_sh.cycle_exec_sql(sql, 20)
        self.log.info(dml_res)
        self.assertTrue(dml_res)

        text = '-----step6:查看数据，导出;expect:成功-----'
        self.log.info(text)
        checksum_before = []
        for i in range(3):
            sql = f'select checksum({self.tb_name[i]}::text) ' \
                  f'from {self.tb_name[i]};'
            res = self.pri_sh.execut_db_sql(sql)
            self.log.info(res)
            checksum_before.append(res.splitlines()[-2].strip())
            self.log.info(checksum_before)

        self.log.info('---导出---')
        dump_cmd = f"source {macro.DB_ENV_PATH};" \
                   f"gs_dump " \
                   f"-p {self.pri_user.db_port} " \
                   f"{self.pri_user.db_name} -F c " \
                   f"-f {self.dump_path}"
        self.log.info(dump_cmd)
        dump_msg = self.pri_user.sh(dump_cmd).result()
        self.log.info(dump_msg)
        self.assertIn(self.constant.GS_DUMP_SUCCESS_MSG, dump_msg,
                      '执行失败:' + text)

        self.log.info('---删表---')
        drop_res = []
        for k, v in enumerate(self.tb_name):
            sql = f'drop table if exists {v};'
            drop_res.append(self.pri_sh.execut_db_sql(sql))
            self.log.info(drop_res)
            self.assertIn(self.constant.DROP_TABLE_SUCCESS, drop_res[k],
                          '执行失败：' + text)
            k += 1
        self.assertEqual(
            sum(1 for s in drop_res if re.search(r'DROP\s+TABLE', s)), 3,
            '执行失败：' + text)

        text = '-----step7:恢复;expect:成功-----'
        self.log.info(text)
        cmd = f"source {macro.DB_ENV_PATH};" \
                   f"gs_restore " \
                   f"-p {self.pri_user.db_port} " \
                   f"-d {self.pri_user.db_name} " \
                   f"{self.dump_path}"
        self.log.info(cmd)
        res = self.pri_user.sh(cmd).result()
        self.log.info(res)
        self.assertIn(self.constant.RESTORE_SUCCESS_MSG, res,
                      '执行失败:' + text)

        text = '-----step8:查看数据，对比导出前;expect:一致-----'
        self.log.info(text)
        checksum_after = []
        for i in range(3):
            sql = f'select checksum({self.tb_name[i]}::text) ' \
                  f'from {self.tb_name[i]};'
            res = self.pri_sh.execut_db_sql(sql)
            self.log.info(res)
            checksum_after.append(res.splitlines()[-2].strip())
            self.log.info(checksum_after)
        for i in range(3):
            self.assertEqual(checksum_before, checksum_after,
                             '执行失败:' + text)

    def tearDown(self):
        text = '-----step9:清理环境; expect:成功-----'
        self.log.info(text)
        curr_param = self.common.show_param('enable_incremental_checkpoint')
        self.log.info(curr_param)
        if curr_param != self.default_param:
            res = self.pri_sh.execute_gsguc(
                'reload', self.constant.GSGUC_SUCCESS_MSG,
                f'enable_incremental_checkpoint={self.default_param}')
            self.log.info(res)
            self.assertTrue(res)

        cmd = f'rm -rf {self.dump_path};' \
              f'if [ -d {self.dump_path} ]; ' \
              f'then echo "exists"; else echo "not exists"; fi'
        rm_res = self.common.get_sh_result(self.pri_user, cmd)
        self.log.info(rm_res)

        drop_res = []
        for k, v in enumerate(self.tb_name):
            sql = f'drop table if exists {v};'
            drop_res.append(self.pri_sh.execut_db_sql(sql))
            self.log.info(drop_res)
            self.assertIn(self.constant.DROP_TABLE_SUCCESS, drop_res[k],
                          '执行失败：' + text)
            k += 1

        self.assertEqual(
            sum(1 for s in drop_res if re.search(r'DROP\s+TABLE', s)), 3,
            '执行失败：' + text)
        self.assertEqual('not exists', rm_res, '文件删除失败')
        self.log.info(f'-----{os.path.basename(__file__)} end-----')