"""
Case Type   : 硬件故障类--CPU
Case Name   : 数据库减容/扩容时备节点CPU占用率100%
Create At   : 2021/07/12
Owner       : 5328126
Description :
    1.备节点注入CPU占用率100%
    2.1主2备，减1备  期望:减容成功
    3.检查集群中节点个数
    4.执行TPCC，并检查数据一致性
    5.备节点注入CPU占用率100%
    6.将减容节点还原
    7.检查集群中节点个数
    8.执行TPCC，并检查数据一致性
Expect      :
    1.成功
    2.成功
    3.节点数变为2
    4.执行成功，主备一致
    5.成功
    6.成功
    7.节点数变为3
    8.执行成功，主备一致
History     :
    Modified by 5328126 2021/8/3:扩容后需手动修改synchronous_standby_names
    Modified by 1115623 2023/2/16:修改注入CPU占用率故障断言信息
    Modified by 1205145 2023/10/31:扩容前重建root互信
"""

import unittest
import time
import os
from yat.test import Node, macro
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger


class CPUOverloadAndRunTPCC(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.primary_root_node = Node(node='PrimaryRoot')
        self.sta1_root_node = Node(node='Standby1Root')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.constant = Constant()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.priroot_sh = CommonSH('PrimaryRoot')
        self.sta2root_sh = CommonSH('Standby2Root')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')

        result = self.primary_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_p = result.strip().splitlines()[-2]
        result = self.sta_2_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s = result.strip().splitlines()[-2]
        result = self.sta_1_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s1 = result.strip().splitlines()[-2]
        self.host_tuple = (self.primary_user_node.ssh_host,
                           self.sta1_dbuser.ssh_host, self.sta2_dbuser.ssh_host)
        self.params = {'-f': 'test_hosts'}

    def test_main(self):
        self.log.info("--------------1.备节点注入CPU占用率100%-------------")
        shell_cmd = "cat /proc/cpuinfo |grep 'processor'|wc -l"
        self.log.info(shell_cmd)
        cpu_num = self.sta1_root_node.sh(shell_cmd).result()
        self.log.info(cpu_num)
        inject_command = f"rCPU_Overloadau (cpuid1, cpuid2, usage, time) " \
            f"values(0, {int(cpu_num)-1}, 100, 60)"
        result = self.com.cfe_inject(self.sta1_root_node, inject_command)
        self.log.info(result)
        flag = self.constant.cfe_inject_rcpu_overloadau_success_msg in result
        if not flag:
            res = self.com.cfe_inject(self.sta1_root_node, inject_command)
            self.log.info(res)
            self.assertIn(self.constant.cfe_inject_rcpu_overloadau_success_msg,
                          res.strip())

        self.log.info("-------------2.减容-----------------")
        execute_cmd = f'''source {macro.DB_ENV_PATH};
                            expect <<EOF
                            set timeout 120
                            spawn gs_dropnode -U \
                            {self.primary_user_node.ssh_user} \
                            -G {self.primary_user_node.ssh_user} \
                            -h {self.sta2_dbuser.ssh_host}
                            expect "*drop the target node (yes/no)?*"
                            send "yes\\n"
                            expect eof\n''' + '''EOF'''
        self.log.info(execute_cmd)
        result = self.primary_user_node.sh(execute_cmd).result()
        self.log.info(result)
        self.assertIn("Success to drop the target nodes", result)

        self.log.info("------------3.检查集群中节点个数-----------------")
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("1", result.splitlines()[-2].strip())

        self.log.info("-------------4.运行TPCC---------------")
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result)

        self.log.info('----检查主备是否同步----')
        time.sleep(10)
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result)

        self.log.info('----检查数据一致性----')
        nodes_tuple = (self.primary_user_node, self.sta1_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag)
        time.sleep(60)

        self.log.info("--------------5.备节点注入CPU占用率100%-------------")
        shell_cmd = "cat /proc/cpuinfo |grep 'processor'|wc -l"
        self.log.info(shell_cmd)
        cpu_num = self.sta1_root_node.sh(shell_cmd).result()
        self.log.info(cpu_num)
        inject_command = f"rCPU_Overloadau (cpuid1, cpuid2, usage, time) " \
            f"values(0, {int(cpu_num) - 1}, 100, 300)"
        result = self.com.cfe_inject(self.sta1_root_node, inject_command)
        self.log.info(result)
        flag = self.constant.cfe_inject_rcpu_overloadau_success_msg in result
        if not flag:
            res = self.com.cfe_inject(self.sta1_root_node, inject_command)
            self.log.info(res)
            self.assertIn(self.constant.cfe_inject_rcpu_overloadau_success_msg,
                          res.strip())

        self.log.info("---------------6.扩容-------------------")
        text = '---预置步骤: 数据库主机解压安装包，以免找不到扩容脚本 expect: 成功---'
        self.log.info(text)
        cmd = f'cd {os.path.dirname(macro.DB_SCRIPT_PATH)} && ' \
            f'tar -xf openGauss-Package-bak*.tar.gz && ' \
            f'ls {macro.DB_SCRIPT_PATH}|grep gs_sshexkey'
        self.log.info(cmd)
        res = self.com.get_sh_result(self.priroot_sh.node, cmd)
        self.assertEqual(res, 'gs_sshexkey', f'执行失败: {text}')
        self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                          *self.host_tuple,
                                          **self.params)
        result = self.priroot_sh.exec_expension(
            self.primary_user_node.ssh_user, self.primary_user_node.ssh_user,
            self.sta2_dbuser.ssh_host, macro.DB_XML_PATH)
        self.assertTrue(result)

        self.log.info("------------7.检查集群中节点个数-----------------")
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("2", result.splitlines()[-2].strip())

        self.log.info("-------------8.运行TPCC---------------")
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result)

        self.log.info('----检查主备是否同步----')
        time.sleep(10)
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result)

        self.log.info('----检查数据一致性----')
        nodes_tuple = (self.primary_user_node,
                       self.sta1_dbuser, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag)

    def tearDown(self):
        self.log.info("-------------还原集群---------------")
        time.sleep(300)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        if "2" != result.splitlines()[-2].strip():
            self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                              *self.host_tuple,
                                              **self.params)
            self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta2_dbuser.ssh_host, macro.DB_XML_PATH)

        self.log.info("----------还原synchronous_standby_names-------------")
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)
        self.primary_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                     f"'{self.synchronous_standby_names_p}'",
                                     node_name=p_hostname)
        s_hostname = self.sta2_dbuser.sh("hostname").result()
        self.log.info(p_hostname)
        self.sta_2_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s}'",
                                   node_name=s_hostname)
        s_hostname = self.sta1_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_1_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s1}'",
                                   node_name=s_hostname)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
