"""
Case Type   : 资源池化-推进oldest_xmin
Case Name   : 修改ss_txnstatus_cache_size,测试数据一致性;
Create At   : 2024/4/24
Owner       : zhengxue
Description :
    1、查询ss_txnstatus_cache_size默认值,show ss_txnstatus_cache_size;
    2、设置参数;
    3、运行tpcc业务,主机配置文件primary_write.pg;
    4、备1执行一致性语句;
    5、查询集群状态;
    6、修改参数为默认值;
Expect      :
    1、显示默认值ss_txnstatus_cache_size;
    2、参数修改成功;
    3、tpcc运行正常;
    4、一致性检查正常;
    5、集群状态正常;
    6、恢复默认值成功;
History     :
"""

import os
import time
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.ComThread import ComThread

Primary_SH = CommonSH('PrimaryDbUser')
Standby1_SH = CommonSH('Standby1DbUser')
Primary_ROOTSH = CommonSH('PrimaryRoot')

class SharedStorage(unittest.TestCase):
    def setUp(self):
        self.logger = Logger()
        self.com = Common()
        self.constant = Constant()
        self.primaryRoot = Node('PrimaryRoot')
        
        self.logger.info("===Opengauss_Function_Guc_ForwardOldestXmin_Case0003开始执行===")
        status =  Primary_SH.exec_cm_ctl(mode='query', param='-Cv')
        self.logger.info(status)

        self.assertTrue("Degraded" in status or "Normal" in status)

        self.txnstatus_cache_size_default = 131072
        self.txnstatus_cache_size_set = 262144
        self.txnstatus_cache_size = 'ss_txnstatus_cache_size'

    def test_forward_oldest_xmin(self):
        step1 = '======step1:查询ss_txnstatus_cache_size,默认值为131072;expect:查询成功======'
        self.logger.info(step1)
        status = Primary_SH.exec_cm_ctl(mode='query', param='-Cv')
        self.logger.info(status)
        show_ss_txnstatus_cache_size = Primary_SH.execut_db_sql('''show ss_txnstatus_cache_size;''')
        self.logger.info(show_ss_txnstatus_cache_size)
        if '131072' in show_ss_txnstatus_cache_size:
            self.assertEqual("131072", show_ss_txnstatus_cache_size.split("\n")[-2].strip())
        else:
            result1 = Primary_SH.execute_gsguc(
                                                'set', self.constant.GSGUC_SUCCESS_MSG,
                                                f'{self.txnstatus_cache_size}={self.txnstatus_cache_size_default}')
            restart_res = Primary_SH.restart_db_cluster()
            show_txnstatus_cache_size = self.com.show_param(self.txnstatus_cache_size)
            self.assertEquals(show_txnstatus_cache_size, '131072', 'txnstatus_cache_size恢复默认值执行失败')        
        
        step2 = '======step2:设置参数;expect:设置成功======'
        self.logger.info(step2)
        set_enable_mergejoin = Standby1_SH.execut_db_sql('''set enable_mergejoin=off;''')
        self.logger.info(set_enable_mergejoin)
        self.assertIn("SET", set_enable_mergejoin, '执行失败' + step2)
        set_enable_nestloop = Standby1_SH.execut_db_sql('''set enable_nestloop=off;''')
        self.logger.info(set_enable_nestloop)
        self.assertIn("SET", set_enable_nestloop, '执行失败' + step2)
        
        step3 = '======step3:运行tpcc业务;expect:tpcc进程运行======'
        self.logger.info(step3)
        primary_tpcc = ComThread(self.com.start_tpcc,
                                 args=(self.primaryRoot,'/data/benchmarksql-5.0/run',
                                       './runBenchmark.sh primary_write.pg'))
        primary_tpcc.setDaemon(True)
        primary_tpcc.start()
        status = Primary_ROOTSH.exec_cmd_under_root("ps -ux | grep './runBenchmark.sh' | grep -v -- '--color'")
        self.assertIn('primary_write.pg', status, '执行失败' + step3)

        step4 = "======step4:备节点1执行一致性语句;expect:主备一致性正常======"
        self.logger.info(step4)
        consistence_res1 = Standby1_SH.execut_db_sql('''(Select w_id, w_ytd from bmsql_warehouse) 
                                                     except(select d_w_id, sum(d_ytd) from bmsql_district group by d_w_id);''')
        self.logger.info(consistence_res1)
        self.assertIn('0 rows', consistence_res1.splitlines()[-1], '执行失败' + step4)

        consistence_res2 = Standby1_SH.execut_db_sql('''(Select d_w_id, d_id, D_NEXT_O_ID - 1 from bmsql_district) except 
                                                     (select o_w_id, o_d_id, max(o_id) from bmsql_oorder group by o_w_id, o_d_id);''')
        self.logger.info(consistence_res2)
        self.assertIn('0 rows', consistence_res2.splitlines()[-1], '执行失败' + step4)

        consistence_res3 = Standby1_SH.execut_db_sql('''(Select d_w_id, d_id, D_NEXT_O_ID - 1 from bmsql_district) except 
                                                     (select no_w_id, no_d_id, max(no_o_id) from bmsql_new_order group by no_w_id, no_d_id);''')
        self.logger.info(consistence_res3)
        self.assertIn('0 rows', consistence_res3.splitlines()[-1], '执行失败' + step4)

        consistence_res4 = Standby1_SH.execut_db_sql('''
                                                     select * from (select (count(no_o_id)-(max(no_o_id)-min(no_o_id)+1)) as 
                                                     diff from bmsql_new_order group by no_w_id, no_d_id) where diff != 0;''')
        self.logger.info(consistence_res4)
        self.assertIn('0 rows', consistence_res4.splitlines()[-1], '执行失败' + step4)

        consistence_res5 = Standby1_SH.execut_db_sql('''(select o_w_id, o_d_id, sum(o_ol_cnt) from bmsql_oorder  
                                                     group by o_w_id, o_d_id) except (select ol_w_id, ol_d_id, count(ol_o_id) from bmsql_order_line 
                                                     group by ol_w_id, ol_d_id);''')
        self.logger.info(consistence_res5)
        self.assertIn('0 rows', consistence_res5.splitlines()[-1], '执行失败' + step4)
        time.sleep(20)

        step5 = "======step5:tpcc业务结束,查询集群状态;expect:集群状态正常======"
        self.logger.info(step5)
        status = Primary_SH.exec_cm_ctl(mode='query', param='-Cv')
        self.logger.info(status)
        self.assertIn('cluster_state   : Normal', status, '执行失败' + step5)
        self.assertIn('P Primary Normal',
                      status.splitlines()[-1].split('|')[0].split('6001')[-1],
                      '执行失败' + step5)
        self.assertIn('S Standby Normal',
                      status.splitlines()[-1].split('|')[1].split('6002')[-1],
                      '执行失败' + step5)

    def tearDown(self):
        step6 = "======step6:环境清理,恢复参数;expect:环境清理成功,恢复参数成功======"
        self.logger.info(step6)
        status = Primary_SH.exec_cm_ctl(mode='query', param='-Cv')
        self.logger.info(status)
        status = Primary_SH.check_cluster_core_status(status, wait_times=600)
        self.logger.info(status)
        stop_tpcc_cmd = "ps -ef | grep runBenchmark | grep -v grep | " \
                        + "awk '{{print $2}}' | xargs kill -9"
        stop_tpcc = Primary_ROOTSH.exec_cmd_under_root(stop_tpcc_cmd)
        set_enable_mergejoin = Primary_SH.execut_db_sql('''set enable_mergejoin=on;''')
        self.logger.info(set_enable_mergejoin)
        self.assertIn("SET", set_enable_mergejoin, '执行失败' + step6)
        set_enable_nestloop = Primary_SH.execut_db_sql('''set enable_nestloop=on;''')
        self.logger.info(set_enable_nestloop)
        self.assertIn("SET", set_enable_nestloop, '执行失败' + step6)

        restart_res = Primary_SH.restart_db_cluster()
        recovery = Primary_SH.exec_cm_ctl(mode='switchover', param='-a')
        self.logger.info(recovery)
        self.assertIn(self.constant.cm_switchover_success_msg, recovery,
                      '执行失败' + step6)

        status = Primary_SH.exec_cm_ctl(mode='query', param='-Cv')
        self.logger.info(status)
        self.assertIn('cluster_state   : Normal', status, '执行失败' + step6)
        self.assertIn('P Primary Normal',
                      status.splitlines()[-1].split('|')[0].split('6001')[-1],
                      '执行失败' + step6)
        self.assertIn('S Standby Normal',
                      status.splitlines()[-1].split('|')[1].split('6002')[-1],
                      '执行失败' + step6)
        self.assertIn('S Standby Normal',
                      status.splitlines()[-1].split('|')[2].split('6003')[-1],
                      '执行失败' + step6)

        self.logger.info(f"-----{os.path.basename(__file__)} end-----")

