"""
Case Type   : DiskANN向量检索
Case Name   : 并行重建索引过程注入故障，故障类型为备机kill -9
Create At   : 2025/11/22
Owner       : sungang14
Description :
    1.创建表、插入数据，设置索引构建并行度
    2.创建索引
    3.再次插入数据
    4.重建索引
    5.步骤4执行过程中，kill备机数据库进程
    6.重启数据库
    7.再次重建索引
    8.主备均使用索引查询
    9.清理环境
Expect      :
    1.创建表、插入数据成功，设置索引构建并行度为32
    2.索引创建成功
    3.插入数据成功
    4.下发成功
    5.kill -9执行成功
    6.数据库状态正常
    7.索引重建成功
    8.主备查询数据一致
    9.环境清理成功
History     :
"""

import os
import time
import unittest

from yat.test import Node
from yat.test import macro

from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.ComThread import ComThread
from testcase.utils.Logger import Logger

logger = Logger()
primary_sh = CommonSH('PrimaryDbUser')


class Diskann_Index(unittest.TestCase):
    def setUp(self):
        logger.info(f'--------{os.path.basename(__file__)} start--------')
        self.constant = Constant()
        self.com = Common()
        self.pri_node = Node('PrimaryDbUser')
        self.sta1_node = Node('Standby1DbUser')
        self.sta1_sh = CommonSH('Standby1DbUser')
        self.table_name = f't_func_diskann_index0069'
        self.index_name = f'idx_func_diskann_index0069'
        self.prepare_data_00 = f"drop table if exists {self.table_name};"
        self.prepare_data_01 = f'''
                                create table {self.table_name} (id bigserial primary key, val1 vector(5));
                                declare
                                 vector_dim int := 5;
                                 vector_value float[];
                                 record_num int := 1000;
                                 i int;
                                 j int;
                                begin
                                 for j in 1..record_num loop
                                 for i in 1..vector_dim loop
                                 vector_value[i] := random()*1000+1;
                                 end loop;
                                 insert into {self.table_name} (val1) values (vector_value);
                                 end loop;
                                end;
                                '''
        self.prepare_data_01_1 = f"alter table {self.table_name} set (parallel_workers=32);"
        self.prepare_data_02 = f"drop index if exists {self.index_name};"
        self.prepare_data_03 = f"create index {self.index_name} on {self.table_name} using diskann(val1 vector_l2_ops);"
        self.prepare_data_04 = f"insert into {self.table_name}(val1) values('[1,2,3,4,5]');"
        self.prepare_data_05 = f"reindex index {self.index_name};"
        self.prepare_data_06 = f"set enable_seqscan=off;" \
                               f"set enable_indexscan=on;" \
                               f"select * from {self.table_name} order by val1 <-> '[1,2,3,4,5]' limit 5;"

    def test_index(self):
        text = '--------step1: 创建表、插入数据,设置索引构建并行度; expect: 创建表、插入数据成功,设置索引构建并行度为32--------'
        logger.info(text)
        data_cmd = primary_sh.execut_db_sql(self.prepare_data_00)
        logger.info(data_cmd)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, data_cmd, '执行失败' + text)
        data_cmd = primary_sh.execut_db_sql(self.prepare_data_01)
        logger.info(data_cmd)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS, data_cmd, '执行失败' + text)
        self.assertIn(f'ANONYMOUS BLOCK EXECUTE', data_cmd, '执行失败' + text)
        data_cmd = primary_sh.execut_db_sql(self.prepare_data_01_1)
        logger.info(data_cmd)
        self.assertIn(self.constant.ALTER_TABLE_MSG, data_cmd, '执行失败' + text)

        text = '--------step2:创建索引; expect:开始执行--------'
        logger.info(text)
        data_cmd = primary_sh.execut_db_sql(self.prepare_data_02)
        logger.info(data_cmd)
        self.assertIn(self.constant.DROP_INDEX_SUCCESS_MSG, data_cmd, '执行失败' + text)
        res = primary_sh.execut_db_sql(self.prepare_data_03)
        logger.info(res)
        self.assertIn(self.constant.CREATE_INDEX_SUCCESS, res)

        text = '--------step3: 再次插入数据; expect:成功--------'
        logger.info(text)
        res = primary_sh.execut_db_sql(self.prepare_data_04)
        logger.info(res)
        self.assertIn(f'INSERT 0 1', res, '执行失败' + text)

        text = '--------step4: 重建索引; expect:成功--------'
        logger.info(text)
        ix_thread = ComThread(primary_sh.execut_db_sql,
                              args=(self.prepare_data_05,))
        ix_thread.setDaemon(True)
        ix_thread.start()
        ix_thread.join(2)

        text = '--------step5: 步骤4执行过程中,kill备机数据库进程; expect:成功--------'
        logger.info(text)
        cmd = r"ps ux|grep 'gaussdb '|grep -v grep|awk '{{print $2}}'|xargs kill -9"
        logger.info(cmd)
        res = self.sta1_node.sh(cmd).result()
        logger.info(res)

        text = '--------step6: 重启数据库; expect:成功--------'
        logger.info(text)
        result = self.sta1_sh.start_db_instance(mode='standby')
        self.assertIn(self.constant.REBUILD_SUCCESS_MSG, result)

        text = '--------step7: 再次创建索引; expect:成功--------'
        logger.info(text)
        res = primary_sh.execut_db_sql(self.prepare_data_05)
        logger.info(res)
        self.assertIn(self.constant.REINDEX_SUCCESS_MSG, res)

        text = '--------step8: 主备均使用索引查询; expect:结果相同--------'
        logger.info(text)
        time.sleep(1)
        res_list = []
        node_num = self.com.get_node_num(self.pri_node)
        res = primary_sh.execut_db_sql(self.prepare_data_06)
        logger.info(f"Primary res: {res}")
        res_list.append(res)
        for i in range(1, node_num):
            res = CommonSH(f'Standby{i}DbUser').execut_db_sql(self.prepare_data_06)
            logger.info(f"Standby {i} res: {res}")
            res_list.append(res)
        self.assertEqual(len(list(set(res_list))), 1)

    def tearDown(self):
        text = '--------step9: 清理环境; expect:成功--------'
        logger.info(text)
        status_msg = primary_sh.get_db_cluster_status('status')
        logger.info(status_msg)
        if not status_msg:
            primary_sh.restart_db_cluster()
        res = primary_sh.execut_db_sql(self.prepare_data_00)
        logger.info(res)
        self.assertIn(self.constant.DROP_TABLE_SUCCESS, res)
        logger.info(f'--------{os.path.basename(__file__)} end--------')
