"""
Case Type   : 资源池化集群系统支持归档备份
Case Name   : 开启归档，使用归档日志恢复到recovery_target_xid
Create At   : 2025/04/19
Owner       : @kyleze
Description :
    1.开启归档
    2.创建基础表，插入数据
    3.执行全量备份
    4.多次创建表，插入数据，并记录xid
    5.执行pg_switch_xlog
    6.使用归档日志恢复到recovery_target_xid
    7.查询恢复后的数据
Expect      :
    1.成功
    2.成功
    3.成功
    4.成功
    5.成功
    6.恢复成功
    7.恢复后的数据与xid点前的数据一致
History     :
"""

import os
import unittest

from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH

from yat.test import Node
from yat.test import macro


@unittest.skipIf(
    "dms_res" not in Common().get_sh_result(
        Node("PrimaryDbUser"),
        f"source {macro.DB_ENV_PATH};cm_ctl query -Cvidp"), "非资源池化不执行")
class SharedStorageAchiveXlog(unittest.TestCase):
    def setUp(self):
        self.logger = Logger()
        self.logger.info(f"----{os.path.basename(__file__)} start----")
        self.constant = Constant()
        self.com = Common()
        self.st2_com = Common("Standby2DbUser")
        self.primary_sh = CommonSH("PrimaryDbUser")
        self.primary_node = Node("PrimaryDbUser")
        self.table_name = "t_archive_xlog_test019"
        self.table_name_1 = "t_archive_xlog_test019_01"
        self.table_name_2 = "t_archive_xlog_test019_02"
        self.table_name_3 = "t_archive_xlog_test019_03"
        self.archive_dest = os.path.join(macro.DB_BACKUP_PATH,
                                         "archive_dir_019/")
        self.default_archive_mode = \
            self.com.show_param("archive_mode")
        self.default_archive_dest = \
            self.com.show_param("archive_dest")
        self.parent_path = os.path.dirname(macro.DB_INSTANCE_PATH)
        self.backup_path = os.path.join(self.parent_path,
                                        "backupset/backup_dir_019")
        self.instance_name = 'probackup_019'
        self.primary_script = "sharestorage_probackup_primary_restore.sh"
        self.target_path = os.path.join(macro.DB_BACKUP_PATH, 'backup_dir_019')
        self.recover_conf = \
            os.path.join(macro.DB_INSTANCE_PATH, 'recovery.conf')
        self.logger.info("-----检查数据库状态是否正常-----")
        status = self.primary_sh.get_db_cluster_status()
        self.assertTrue("Degraded" in status or "Normal" in status)
        cmd = f'source {macro.DB_ENV_PATH};' \
              f'cat $DSS_HOME/cfg/dss_vg_conf.ini |cut -d ":" -f 1'
        vgname_infor = self.primary_node.sh(cmd).result().strip()
        self.vgname = "+" + vgname_infor.split('\n')[0].strip() + \
                      ",+" + vgname_infor.split('\n')[1].strip()

    def test_archive_xlog(self):
        step = "--step1:设置archive_mode=on expect:成功--"
        self.logger.info(step)
        guc_res = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_mode=on")
        self.logger.info(guc_res)
        self.assertTrue(guc_res, "执行失败:" + step)
        self.logger.info(step)
        guc_res = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_dest='{self.archive_dest}'")
        self.logger.info(guc_res)
        self.assertTrue(guc_res, "执行失败:" + step)

        step = "--step2:创建基础表，插入数据 expect:成功--"
        self.logger.info(step)
        sql_cmd = f"drop table if exists {self.table_name};" \
                  f"create table {self.table_name}(id int,name char(20));" \
                  f"insert into {self.table_name} values " \
                  f"(generate_series(1,1000), 'test');"
        self.logger.info(sql_cmd)
        sql_res = self.primary_sh.execut_db_sql(sql_cmd)
        self.logger.info(sql_res)
        self.assertIn(self.constant.INSERT_SUCCESS_MSG, sql_res,
                      "执行失败:" + step)

        step = "--step3:执行全量备份 expect:成功--"
        self.logger.info(step)
        self.logger.info("-----初始化目录-----")
        result = self.primary_sh.exec_pro_backup_init(self.backup_path)
        self.logger.info(result)
        self.assertTrue(result, "执行失败:" + step)
        self.logger.info("-----添加备份实例-----")
        result = self.primary_sh.exec_pro_back_add(
            self.backup_path, self.instance_name,
            f"--enable-dss --vgname='{self.vgname}'")
        self.logger.info(result)
        self.assertTrue(result, "执行失败:" + step)
        self.logger.info("-----执行全量备份-----")
        result = self.primary_sh.exec_pro_backup_backup(
            self.backup_path, self.instance_name, 'full',
            f"{self.primary_node.db_name}",
            f"--vgname='{self.vgname}' -j 128", True)
        self.assertIn('completed', result, "执行失败:" + step)
        self.backup_id = ''
        for backupid_msg in result.splitlines():
            if 'completed' in backupid_msg:
                self.backup_id = backupid_msg.split()[2]
        self.logger.info('备份ID为：' + self.backup_id)

        step = "--step4:多次创建表，插入数据，并记录xid expect:成功--"
        self.logger.info(step)
        self.logger.info("--第一次创建表1，记录xid1--")
        sql_cmd = f"drop table if exists {self.table_name_1};" \
                  f"create table {self.table_name_1}(id int,name char(20));" \
                  f"insert into {self.table_name_1} values " \
                  f"(generate_series(1,1000), 'test');"
        self.logger.info(sql_cmd)
        sql_res = self.primary_sh.execut_db_sql(sql_cmd)
        self.logger.info(sql_res)
        self.assertIn(self.constant.INSERT_SUCCESS_MSG, sql_res,
                      "执行失败:" + step)
        xid1_res = self.primary_sh.execut_db_sql("select txid_current();")
        self.logger.info(xid1_res)
        target_xid1 = xid1_res.splitlines()[2].strip()
        self.logger.info("xid1为：" + target_xid1)

        self.logger.info("--第二次创建表2，记录xid2--")
        sql_cmd = f"drop table if exists {self.table_name_2};" \
                  f"create table {self.table_name_2}(id int,name char(20));" \
                  f"insert into {self.table_name_2} values " \
                  f"(generate_series(1,1000), 'test');"
        self.logger.info(sql_cmd)
        sql_res = self.primary_sh.execut_db_sql(sql_cmd)
        self.logger.info(sql_res)
        self.assertIn(self.constant.INSERT_SUCCESS_MSG, sql_res,
                      "执行失败:" + step)
        xid2_res = self.primary_sh.execut_db_sql("select txid_current();")
        self.logger.info(xid2_res)
        target_xid2 = xid2_res.splitlines()[2].strip()
        self.logger.info("xid2为：" + target_xid2)

        self.logger.info("--第三次创建表3，记录xid3--")
        sql_cmd = f"drop table if exists {self.table_name_3};" \
                  f"create table {self.table_name_3}(id int,name char(20));" \
                  f"insert into {self.table_name_3} values " \
                  f"(generate_series(1,1000), 'test');"
        self.logger.info(sql_cmd)
        sql_res = self.primary_sh.execut_db_sql(sql_cmd)
        self.logger.info(sql_res)
        self.assertIn(self.constant.INSERT_SUCCESS_MSG, sql_res,
                      "执行失败:" + step)
        xid3_res = self.primary_sh.execut_db_sql("select txid_current();")
        self.logger.info(xid3_res)
        target_xid3 = xid3_res.splitlines()[2].strip()
        self.logger.info("xid3为：" + target_xid3)

        step = "--step5:执行pg_switch_xlog expect:成功--"
        self.logger.info(step)
        archive_res = self.primary_sh.execut_db_sql("select pg_switch_xlog();")
        self.logger.info(archive_res)
        self.assertIn("1 row", archive_res, "执行失败:" + step)

        step = "--step6:使用归档日志恢复到recovery_target_name expect:成功--"
        self.logger.info(step)
        result = self.primary_sh.exec_cm_ctl('stop')
        self.logger.info(result)
        self.assertIn(self.constant.cm_stop_success_msg, result,
                      "停止集群失败")
        self.logger.info("-----执行全量恢复-----")
        self.st2_com.scp_file(self.primary_node, self.primary_script,
                              self.target_path)
        cmd = f'source {macro.DB_ENV_PATH};cd {self.target_path};' \
              f'sh sharestorage_probackup_primary_restore.sh'
        result = self.primary_node.sh(cmd).result()
        self.logger.info(result)
        self.assertIn('successfully start dssserver', result, "dss启动失败")
        result = self.primary_sh.exec_pro_backup_restore(
            self.backup_path, self.instance_name, self.backup_id,
            restore_cmd=f'-D {macro.DB_INSTANCE_PATH} -j 128')
        self.logger.info(result)
        self.assertTrue(result, "主机执行恢复失败")
        self.logger.info('----停止dssserver----')
        stop_dss_cmd = f"source {macro.DB_ENV_PATH};dsscmd stopdss;"
        res = self.com.get_sh_result(self.primary_node, stop_dss_cmd)
        self.logger.info(res)
        self.assertIn('Succeed to stop server.', res, "dss停止失败")
        self.logger.info("----配置recovery.conf文件----")
        cmd = f"touch " \
              f"{os.path.join(macro.DB_INSTANCE_PATH, 'recovery.conf')};" \
              f"echo \"restore_command = 'dsscmd cp -s " \
              f"{self.archive_dest}%f -d %p'\" > " \
              f"{self.recover_conf};" \
              f"echo \"recovery_target_xid = '{target_xid2}'\" >> " \
              f"{self.recover_conf};" \
              f"echo \"recovery_target_inclusive = false\" >> " \
              f"{self.recover_conf};" \
              f"echo \"pause_at_recovery_target = false\" >> " \
              f"{self.recover_conf};" \
              f"cat {self.recover_conf}"
        self.logger.info(cmd)
        result = self.primary_node.sh(cmd).result()
        self.logger.info(result)
        self.logger.info("----启动集群----")
        start_res = self.primary_sh.exec_cm_ctl('start')
        self.logger.info(start_res)
        self.assertIn(self.constant.cm_start_success_msg, start_res,
                      "启动集群失败")

        step = "--step7:查询恢复后的数据 expect:恢复后的数据与xid节点前的数据一致--"
        self.logger.info(step)
        sql_cmd = f"select count(*) from {self.table_name};" \
                  f"select count(*) from {self.table_name_1};" \
                  f"select count(*) from {self.table_name_2};" \
                  f"select count(*) from {self.table_name_3};"
        self.logger.info(sql_cmd)
        sql_res = self.primary_sh.execut_db_sql(sql_cmd)
        self.logger.info(sql_res)
        self.assertEqual(3, sql_res.count("1000"), "执行失败:" + step)
        self.assertIn(
            'ERROR:  relation "t_archive_xlog_test019_03" does not exist',
            sql_res, "执行失败:" + step)

    def tearDown(self):
        self.logger.info("----清理环境----")
        rm_cmd = f"rm -rf {self.archive_dest};" \
                 f"rm -rf {self.backup_path};" \
                 f"rm -rf {self.recover_conf}"
        self.logger.info(rm_cmd)
        rm_msg = self.primary_node.sh(rm_cmd).result()
        drop_res = self.primary_sh.execut_db_sql(
            f"drop table if exists {self.table_name},{self.table_name_1},"
            f"{self.table_name_2};")
        self.logger.info(drop_res)
        guc_res1 = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_mode={self.default_archive_mode}")
        self.logger.info(guc_res1)
        guc_res2 = self.primary_sh.execute_gsguc(
            "reload", self.constant.GSGUC_SUCCESS_MSG,
            f"archive_dest='{self.default_archive_dest}'")
        self.logger.info(guc_res2)
        self.assertTrue(drop_res, "删除表失败")
        self.assertEqual("", rm_msg, "删除文件失败")
        self.logger.info("----恢复集群----")
        self.logger.info(f'----{os.path.basename(__file__)} end----')
