"""
Case Type   : 资源池化双集群
Case Name   : 资源池化双集群主集群主节点读写、主集群备节点空载、备集群空载下, 集群间 failover 操作
Create At   : 2025-05-15
Owner       : hejiahuan
Description :
    1. 查看主备集群状态 cm_ctl query -Cv
    2. 主节点执行tpcc读写业务, 备节点空载，备集群空载
    3. 首备节点执行并查看集群状态 gs_ddr -t failover, cm_ctl query -Cv
    4. 恢复集群为初始状态 
    主集群主节点执行 gs_ddr -t stop
    主节点和原首备节点分别执行 gs_ddr -t start重新搭建即可
Expect      :
    1. 集群状态正常
    2. 业务执行成功
    3. 成功,集群状态符合预期
    4. 成功
History     :
    1. 2025-05-15 创建
"""
import os
import unittest
import time
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.ComThread import ComThread
from testcase.utils.Common import Common
from testcase.utils.Dual_Cluster_CommonSH import DC_CommonSH
from yat.test import Node
from yat.test import macro
log = Logger()
P_Primary_SH = CommonSH('P_PrimaryDbUser')
S_Primary_SH = CommonSH('S_PrimaryDbUser')

@unittest.skipIf(1 == P_Primary_SH.exec_cm_ctl('list', '--param --server | grep ss_double_cluster_mode').split('=')[-1].strip(), '非双集群环境下不执行')
class Dual_Cluster_NormalCase_Case0021(unittest.TestCase):

    def setUp(self):
        log.info(f'-----{os.path.basename(__file__)} start-----')
        self.constant = Constant()
        self.com = Common()
        self.DC = DC_CommonSH()
        self.P_Primary = Node('P_PrimaryRoot')
        self.p_p_node = Node('P_PrimaryDbUser')
        self.s_p_node = Node('S_PrimaryDbUser')

    def test_shared_storage(self):
        text = '-----step1:查看主备集群状态; expect:集群状态正常-----'
        log.info(text)
        status = P_Primary_SH.exec_cm_ctl(mode='query', param='-Cv')
        log.info(status)
        self.assertIn('cluster_state   : Normal', status, '执行失败' + text)
        status = S_Primary_SH.exec_cm_ctl(mode='query', param='-Cv', env=macro.DB_STANDBY_ENV_PATH)
        log.info(status)
        self.assertIn('cluster_state   : Normal', status, '执行失败' + text)
        status = P_Primary_SH.check_data_consistency()
        self.assertTrue(status, '容灾关系断开' + text)

        text = '-----step2:主节点执行读写业务，备节点空载，备集群空载  expect:启动业务成功-----'
        log.info(text)
        tpcc_cmd_p = f'./runBenchmark.sh {macro.PRI_P_PROPS}'
        tpcc_thread_p = ComThread(self.com.startTPCC,
                                  args=(self.P_Primary,
                                        macro.TPCC_PATH,
                                        tpcc_cmd_p))
        tpcc_thread_p.setDaemon(True)
        tpcc_thread_p.start()

        text = '-----step3:首备节点执行gs_ddr -t failover并查看集群状态; expect:执行成功-----'
        log.info(text)
        s_failover = ComThread(self.DC.gs_ddr_failover, args=(self.s_p_node,
            f'{macro.DB_STANDBY_ENV_PATH}', f'{macro.DB_STANDBY_TMP}'))
        s_failover.setDaemon(True)
        s_failover.start()
        time.sleep(120)
        res = s_failover.get_result()
        log.info(res)
        self.assertIn('Successfully do dorado disaster recovery failover', res)
        status = S_Primary_SH.exec_cm_ctl(mode='query', param='-Cv', env=macro.DB_STANDBY_ENV_PATH)
        log.info(status)
        res1 = status.splitlines()[-1].split('|')[0].split('6001')[-1]
        res2 = status.splitlines()[-1].split('|')[1].split('6002')[-1]
        log.info(res1)
        log.info(res2)
        self.assertIn('P Primary Normal', res1, '执行失败' + text)
        self.assertIn('S Standby Normal', res2, '执行失败' + text)

    def tearDown(self):
        text = '-----step4:恢复环境为初始集群状态; expect:成功-----'
        log.info(text)
        status = S_Primary_SH.exec_cm_ctl(mode='query', param='-Cv', env=macro.DB_STANDBY_ENV_PATH)
        log.info(status)
        status = S_Primary_SH.exec_cm_ctl(mode='stop', env=macro.DB_STANDBY_ENV_PATH)
        log.info(status)
        self.assertIn('successfully', status, '执行失败' + text)

        status = S_Primary_SH.exec_cm_ctl(mode='start', env=macro.DB_STANDBY_ENV_PATH)
        log.info(status)
        self.assertIn('successfully', status, '执行失败' + text)

        recovery_switchover = S_Primary_SH.exec_cm_ctl(mode='switchover', param='-a', env=macro.DB_STANDBY_ENV_PATH)
        log.info(recovery_switchover)
        self.assertIn(self.constant.cm_switchover_success_msg, recovery_switchover,
                      '执行失败' + text)

        p_stop = ComThread(self.DC.gs_ddr_stop, args=(self.p_p_node, f'{macro.DB_ENV_PATH}',
            f'--json {macro.DUAL_CLUSTER_P_JSON_PATH}', f'{macro.DB_PRIMARY_TMP}'))
        p_stop.setDaemon(True)
        p_stop.start()
        time.sleep(120)
        res = p_stop.get_result()
        log.info(res)

        p_start = ComThread(self.DC.gs_ddr_start, args=(self.p_p_node,
            f'{macro.DB_ENV_PATH}', 'primary',
            f'--json {macro.DUAL_CLUSTER_P_JSON_PATH}', '--disaster_type stream'))
        p_start.setDaemon(True)
        p_start.start()
        s_start = ComThread(self.DC.gs_ddr_start, args=(self.s_p_node,
            f'{macro.DB_STANDBY_ENV_PATH}', 'disaster_standby',
            f'--json {macro.DUAL_CLUSTER_S_JSON_PATH}', '--disaster_type stream'))
        s_start.setDaemon(True)
        s_start.start()
        s_start.join(60 * 5)
        res = p_start.get_result()
        log.info(res)
        res = s_start.get_result()
        log.info(res)

        status = S_Primary_SH.exec_cm_ctl(mode='query', param='-Cv', env=macro.DB_STANDBY_ENV_PATH)
        log.info(status)
        self.assertIn('P Main Standby Normal',
                      status.splitlines()[-1].split('|')[0].split('6001')[-1],
                      '执行失败' + text)
        self.assertIn('S Standby Normal',
                      status.splitlines()[-1].split('|')[1].split('6002')[-1],
                      '执行失败' + text)
        time.sleep(60)
        status = P_Primary_SH.check_data_consistency()
        self.assertTrue(status, '容灾关系断开' + text)
        log.info(f'-----{os.path.basename(__file__)} end-----')