"""
Case Type   : DiskANN向量检索
Case Name   : diskann索引物理备份、增量量备份恢复功能正常
Create At   : 2025-11-21
Owner       : sungang14
Description :
    1、打开guc参数增量备份开关
    2、创建table插入数据
    3、使用全量备份
    4、创建diskann索引
    5、进行增量备份
    6、停库，使用增量备份数据进行恢复
    7、reindex index后进行数据查询
    8、环境清理和恢复默认值
Expect      :
    1、设置guc参数enable_cbm_tracking为on
    2、表创建成功，数据插入成功
    3、全量备份成功
    4、创建索引成功
    5、增量备份成功
    6、停库，使用增量备份数据进行恢复成功
    7、reindex index后进行数据查询成功，查询结果与step4一致
    8、环境清理和恢复默认值成功
History     :
"""

import os
import re
import unittest

from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Common import Common
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro

logger = Logger()
Primary_SH = CommonSH('PrimaryDbUser')


@unittest.skipIf(3 != Primary_SH.get_node_num(), '非1+2环境不执行')
class SlowSQLTestCase(unittest.TestCase):
    def setUp(self):
        logger.info(f'-----{os.path.basename(__file__)} start-----')
        self.constant = Constant()
        self.comsh = CommonSH('PrimaryDbUser')
        self.user_node = Node('PrimaryDbUser')
        self.common = Common()
        status = self.comsh.get_db_cluster_status('status')
        self.assertTrue(status)
        self.table_name = f't_func_diskann_index0049'
        self.index_name = f'idx_func_diskann_index0049'
        self.set_param_value1 = "enable_cbm_tracking = on"
        self.show_param_value1 = f"show enable_cbm_tracking;"
        param_value1 = self.comsh.execut_db_sql(self.show_param_value1)
        logger.info(param_value1)
        param_value1 = param_value1.splitlines()[-2].strip()
        self.reset_param_value1 = f"enable_cbm_tracking = {param_value1}"
        self.prepare_data_00 = f"drop table if exists {self.table_name};"
        self.prepare_data_01 = f'''
                                create table {self.table_name} (id bigserial primary key, val1 vector(5));
                                declare
                                 vector_dim int := 5;
                                 vector_value float[];
                                 record_num int := 1000;
                                 i int;
                                 j int;
                                begin
                                 for j in 1..record_num loop
                                 for i in 1..vector_dim loop
                                 vector_value[i] := random()*1000+1;
                                 end loop;
                                 insert into {self.table_name} (val1) values (vector_value);
                                 end loop;
                                end;
                                '''
        self.prepare_data_02 = f"drop index if exists {self.index_name};" \
                               f"alter table {self.table_name} set (parallel_workers=32);" \
                               f"create index {self.index_name} on {self.table_name} using diskann(val1 vector_l2_ops);"
        self.prepare_data_03 = f"reindex index {self.index_name};"
        self.prepare_data_04 = f"set enable_seqscan=off;" \
                               f"set enable_indexscan=on;" \
                               f"select * from {self.table_name} order by val1 <-> '[1,2,3,4,5]' limit 5;"
        self.backup_dir = 'dumpcase0049'
        self.backup_path = os.path.join(os.path.dirname(macro.DB_INSTANCE_PATH), self.backup_dir)

    def test_slowsql(self):
        text = '--step1: 设置guc参数增量备份开关； expect: 成功'
        logger.info(text)
        set_cmd1 = self.comsh.execute_gsguc('reload',
                                            self.constant.GSGUC_SUCCESS_MSG,
                                            self.set_param_value1)
        logger.info(set_cmd1)

        text = '--step2:创建table插入数据;expect:创建成功，数据插入成功--'
        logger.info(text)
        data_cmd = self.comsh.execut_db_sql(self.prepare_data_00)
        logger.info(data_cmd)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, data_cmd, '执行失败' + text)
        data_cmd = self.comsh.execut_db_sql(self.prepare_data_01)
        logger.info(data_cmd)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS, data_cmd, '执行失败' + text)
        self.assertIn(f'ANONYMOUS BLOCK EXECUTE', data_cmd, '执行失败' + text)

        text = '--step3:使用gs_probackup全量备份;expect:成功-----'
        logger.info(text)
        logger.info('--新建目录，进行初始化--')
        cmd = f"rm -rf {self.backup_path};mkdir -p {self.backup_path};" \
              f"chmod 700 {self.backup_path};" \
              f"ls {os.path.dirname(macro.DB_INSTANCE_PATH)} | " \
              f"grep '{self.backup_dir}';"
        res = self.common.get_sh_result(self.user_node, cmd)
        self.assertIn(f'{self.backup_dir}', res, '执行失败：' + text)

        cmd = f'source {macro.DB_ENV_PATH};' \
              f'gs_probackup init -B {self.backup_path}'
        res = self.common.get_sh_result(self.user_node, cmd)
        self.assertIn('successfully inited', res, '执行失败：' + text)

        logger.info('--在备份路径内初始化一个新的备份实例--')
        cmd = f"source {macro.DB_ENV_PATH};" \
              f"gs_probackup add-instance -B {self.backup_path} " \
              f"-D {macro.DB_INSTANCE_PATH} --instance=test_diskann"
        res = self.common.get_sh_result(self.user_node, cmd)
        self.assertIn('successfully inited', res, '执行失败：' + text)

        logger.info('--进行全量备份--')
        cmd = f"source {macro.DB_ENV_PATH};" \
              f"gs_probackup backup -B {self.backup_path} -b full " \
              f"--instance=test_diskann " \
              f"--no-validate -p {self.user_node.db_port} " \
              f"-d {self.user_node.db_name} -j 4"
        res = self.common.get_sh_result(self.user_node, cmd)
        re.search(f'Backup * completed', res)

        text = '--step4:创建diskann索引;expect:创建失败，报错diskann不支持极致RTO--'
        logger.info(text)
        data_cmd = self.comsh.execut_db_sql(self.prepare_data_02)
        logger.info(data_cmd)
        self.assertIn(self.constant.DROP_INDEX_SUCCESS_MSG, data_cmd, '执行失败' + text)
        self.assertIn(self.constant.ALTER_TABLE_MSG, data_cmd, '执行失败' + text)
        self.assertIn(self.constant.CREATE_INDEX_SUCCESS, data_cmd, '执行失败' + text)
        data_cmd1 = self.comsh.execut_db_sql(self.prepare_data_04)
        logger.info(data_cmd1)

        text = '--step5: 进行增量备份; expect: 成功'
        logger.info(text)
        cmd = f"source {macro.DB_ENV_PATH};" \
              f"gs_probackup backup -B {self.backup_path} -b ptrack " \
              f"--instance=test_diskann -d {self.user_node.db_name} " \
              f"-p {self.user_node.db_port} -j 4"
        res = self.common.get_sh_result(self.user_node, cmd)
        re.search(f'Backup * completed', res)

        text = '--step6:使用增量备份数据进行恢复;expect:成功-----'
        logger.info(text)
        show_cmd = f"source {macro.DB_ENV_PATH}; gs_probackup show -B {self.backup_path} --instance=test_diskann;"
        res = self.common.get_sh_result(self.user_node, show_cmd)
        logger.info(res)
        backup_id = res.splitlines()[-2].split()[2].strip()
        logger.info(f'backup_id: {backup_id}')
        stop_cmd = self.comsh.stop_db_cluster()
        logger.info(stop_cmd)
        cmd = f"source {macro.DB_ENV_PATH};" \
              f"gs_probackup restore -B {self.backup_path} --instance=test_diskann " \
              f"--incremental-mode=checksum -D {macro.DB_INSTANCE_PATH} -i {backup_id};"
        res = self.common.get_sh_result(self.user_node, cmd)
        self.assertIn(f'Restore of backup {backup_id} completed', res, '执行失败：' + text)

        text = f'--step7:reindex index后进行数据查询;expect：成功'
        logger.info(text)
        restart_flag = self.comsh.restart_db_cluster()
        logger.info(restart_flag)
        self.assertTrue(restart_flag, '执行失败：数据库重启失败')
        data_cmd = self.comsh.execut_db_sql(self.prepare_data_03)
        logger.info(data_cmd)
        self.assertIn(f"REINDEX", data_cmd, '执行失败' + text)
        data_cmd2 = self.comsh.execut_db_sql(self.prepare_data_04)
        logger.info(data_cmd2)
        self.assertEqual(data_cmd1, data_cmd2, '执行失败：reindex index后数据查询结果一致')

    def tearDown(self):
        text = '--step8:清理环境，恢复默认值;expect:成功--'
        logger.info(text)
        reset_cmd = f"rm -rf {self.backup_path}"
        res = self.common.get_sh_result(self.user_node, reset_cmd)
        logger.info(res)
        status = self.comsh.get_db_cluster_status('status')
        if not status:
            restart_flag = self.comsh.restart_db_cluster()
            logger.info(restart_flag)
            self.assertTrue(restart_flag, '执行失败：数据库重启失败')
        data_cmd = self.comsh.execut_db_sql(self.prepare_data_00)
        logger.info(data_cmd)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, data_cmd, '执行失败' + text)
        set_cmd1 = self.comsh.execute_gsguc('reload',
                                            self.constant.GSGUC_SUCCESS_MSG,
                                            self.reset_param_value1)
        logger.info(set_cmd1)
        logger.info(f'-----{os.path.basename(__file__)} end-----')
