"""
Case Type   : 资源池化集群系统支持归档备份
Case Name   : 开启归档,归档过程中kill进程，数据库无影响
Create At   : 2025/04/19
Owner       : @kyleze
Description :
    1.开启归档
    2.执行tpcc业务
    3.执行pg_switch_xlog
    4.生成归档过程中kill进程
    5.等待集群恢复
    6.执行pg_switch_xlog
    7.查看集群状态，查看归档文件
Expect      :
    1.成功
    2.成功
    3.成功
    4.成功
    5.集群恢复Normal
    6.成功
    7.集群状态为Normal，归档文件生成
History     :
"""

import os
import unittest
import time

from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.ComThread import ComThread

from yat.test import Node
from yat.test import macro


@unittest.skipIf(
    "dms_res" not in Common().get_sh_result(
        Node("PrimaryDbUser"),
        f"source {macro.DB_ENV_PATH};cm_ctl query -Cvidp"), "非资源池化不执行")
class SharedStorageAchiveXlog(unittest.TestCase):
    def setUp(self):
        self.logger = Logger()
        self.logger.info(f"----{os.path.basename(__file__)} start----")
        self.constant = Constant()
        self.com = Common()
        self.primary_sh = CommonSH("PrimaryDbUser")
        self.primary_node = Node("PrimaryDbUser")
        self.archive_dest = os.path.join(macro.DB_BACKUP_PATH,
                                         "archive_dir_032/")
        self.default_archive_mode = \
            self.com.show_param("archive_mode")
        self.default_archive_dest = \
            self.com.show_param("archive_dest")

    def test_archive_xlog(self):
        step = "--step1:开启归档 expect:成功--"
        self.logger.info(step)
        guc_res = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_mode=on")
        self.logger.info(guc_res)
        self.assertTrue(guc_res, "执行失败:" + step)
        guc_res = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_dest='{self.archive_dest}'")
        self.logger.info(guc_res)
        self.assertTrue(guc_res, "执行失败:" + step)

        step = "--step2:执行tpcc业务 expect:成功--"
        self.logger.info(step)
        tpcc_thread = ComThread(
            self.com.start_tpcc, args=(self.primary_node, macro.TPCC_PATH))
        tpcc_thread.setDaemon(True)
        tpcc_thread.start()
        time.sleep(20)

        step = "--step3:执行pg_switch_xlog expect:成功--"
        self.logger.info(step)
        archive_thread = ComThread(
            self.primary_sh.execut_db_sql, args=("select pg_switch_xlog();",))
        archive_thread.setDaemon(True)
        archive_thread.start()
        time.sleep(1)

        step = "--step4:生成归档过程中kill进程 expect:成功--"
        self.logger.info(step)
        kill_cmd = "ps ux | grep '\-D' | grep gaussdb| grep -v grep " \
                   "| awk '{{print $2}}' | xargs kill -9"
        kill_res = self.primary_node.sh(kill_cmd).result()
        self.logger.info(kill_res)
        self.assertEqual("", kill_res, "执行失败" + step)

        step = "--step5:等待集群恢复 expect:集群恢复Normal--"
        self.logger.info(step)
        for i in range(60):
            self.logger.info("----第" + str(i + 1) + "次查询集群状态----")
            time.sleep(2)
            cluster_detail = self.primary_sh.get_db_cluster_status("detail")
            self.logger.info(cluster_detail)
            if "Normal" in cluster_detail and "Primary Normal" in \
                    cluster_detail:
                break
            elif i == 59:
                cluster_res = "Degraded" or "Primary Normal" in cluster_detail
                self.assertTrue(cluster_res, "执行失败" + step)

        step = "--step6:执行pg_switch_xlog expect:成功--"
        self.logger.info(step)
        archive_res = self.primary_sh.execut_db_sql(
            "select pg_switch_xlog();select pg_switch_xlog();")
        self.logger.info(archive_res)
        self.assertIn("pg_switch_xlog", archive_res, "执行失败" + step)

        step = "--step7:查看集群状态 expect:集群状态为Normal，归档文件生成--"
        self.logger.info(step)
        cluster_detail = self.primary_sh.get_db_cluster_status("detail")
        self.logger.info(cluster_detail)
        self.assertIn("Primary Normal", cluster_detail, "执行失败" + step)
        ls_cmd = f"ls {self.archive_dest};"
        self.logger.info(ls_cmd)
        ls_archive = self.primary_node.sh(ls_cmd).result()
        self.logger.info(ls_archive)
        archive_bool = 'No such file or directory' not in ls_archive and len(
            ls_archive.split()) > 0
        self.assertTrue(archive_bool, "执行失败:" + step)

    def tearDown(self):
        self.logger.info("----清理环境----")
        rm_cmd = f"rm -rf {self.archive_dest};"
        self.logger.info(rm_cmd)
        rm_msg = self.primary_node.sh(rm_cmd).result()
        self.logger.info(rm_msg)
        guc_res1 = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_mode={self.default_archive_mode}")
        self.logger.info(guc_res1)
        guc_res2 = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_dest='{self.default_archive_dest}'")
        self.logger.info(guc_res2)
        self.assertTrue(guc_res1, "设置参数失败")
        self.assertTrue(guc_res2, "设置参数失败")
        self.assertEqual("", rm_msg, "清理目录失败")
        self.logger.info(f"----{os.path.basename(__file__)} end----")

