"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : 硬件故障类--CPU
Case Name   : 数据库减容/扩容时主节点CPU占用率100%
Create At   : 2021/07/12
@zou_jialiang0505328126
Description :
    1.主节点注入CPU占用率100%
    2.1主2备，减1备  期望:减容成功
    3.检查集群中节点个数
    4.执行TPCC，并检查数据一致性
    5.主节点注入CPU占用率100%
    6.将减容节点还原
    7.检查集群中节点个数
    8.执行TPCC，并检查数据一致性
Expect      :
    1.成功
    2.成功
    3.节点数变为2
    4.执行成功，主备一致
    5.成功
    6.成功
    7.节点数变为3
    8.执行成功，主备一致
History     :
    Modified by @wan005 2021/8/3:扩容后需手动修改synchronous_standby_names
    Modified by @wan005 2022/4/20:扩容前重建root互信
    Modified by wx1115623 2023/2/16:修改注入CPU占用率故障断言信息
"""

import unittest
import time
import os
from yat.test import Node, macro
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger


class CPUOverloadAndRunTPCC(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.constant = Constant()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.priroot_sh = CommonSH('PrimaryRoot')
        self.sta2root_sh = CommonSH('Standby2Root')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')

        result = self.primary_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_p = result.strip().splitlines()[-2]
        result = self.sta_1_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s1 = result.strip().splitlines()[-2]
        result = self.sta_2_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s2 = result.strip().splitlines()[-2]
        self.host_tuple = (self.primary_user_node.ssh_host,
                           self.sta1_dbuser.ssh_host,
                           self.sta2_dbuser.ssh_host)
        self.params = {'-f': 'test_hosts'}

    def test_main(self):
        self.log.info("--------------1.主节点注入CPU占用率100%-------------")
        shell_cmd = "cat /proc/cpuinfo |grep 'processor'|wc -l"
        self.log.info(shell_cmd)
        cpu_num = self.primary_root_node.sh(shell_cmd).result()
        self.log.info(cpu_num)
        inject_command = f"rCPU_Overloadau (cpuid1, cpuid2, usage, time) " \
            f"values(0, {int(cpu_num)-1}, 100, 60)"
        result = self.com.cfe_inject(self.primary_root_node, inject_command)
        self.log.info(result)
        flag = self.constant.cfe_inject_rcpu_overloadau_success_msg in result
        if not flag:
            res = self.com.cfe_inject(self.primary_root_node, inject_command)
            self.log.info(res)
            self.assertIn(self.constant.cfe_inject_rcpu_overloadau_success_msg,
                          res.strip())

        self.log.info("-------------2.减容-----------------")
        execute_cmd = f'''source {macro.DB_ENV_PATH};
                            expect <<EOF
                            set timeout 120
                            spawn gs_dropnode -U \
                            {self.primary_user_node.ssh_user} \
                            -G {self.primary_user_node.ssh_user} \
                            -h {self.sta1_dbuser.ssh_host}
                            expect "*drop the target node (yes/no)?*"
                            send "yes\\n"
                            expect eof\n''' + '''EOF'''
        self.log.info(execute_cmd)
        result = self.primary_user_node.sh(execute_cmd).result()
        self.log.info(result)
        self.assertIn("Success to drop the target nodes", result)

        self.log.info("------------3.检查集群中节点个数-----------------")
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("1", result.splitlines()[-2].strip())

        self.log.info("-------------4.运行TPCC---------------")
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result)

        self.log.info('----检查主备是否同步----')
        time.sleep(10)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result)

        self.log.info('----检查数据一致性----')
        nodes_tuple = (self.primary_user_node, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag)
        time.sleep(60)

        self.log.info("--------------5.主节点注入CPU占用率100%-------------")
        shell_cmd = "cat /proc/cpuinfo |grep 'processor'|wc -l"
        self.log.info(shell_cmd)
        cpu_num = self.primary_root_node.sh(shell_cmd).result()
        self.log.info(cpu_num)
        inject_command = f"rCPU_Overloadau (cpuid1, cpuid2, usage, time) " \
            f"values(0, {int(cpu_num) - 1}, 100, 300)"
        result = self.com.cfe_inject(self.primary_root_node, inject_command)
        self.log.info(result)
        flag = self.constant.cfe_inject_rcpu_overloadau_success_msg in result
        if not flag:
            res = self.com.cfe_inject(self.primary_root_node, inject_command)
            self.log.info(res)
            self.assertIn(self.constant.cfe_inject_rcpu_overloadau_success_msg,
                          res.strip())

        self.log.info("---------------6.扩容-------------------")
        result = self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                              *self.host_tuple,
                                              **self.params)
        self.log.info(result)
        result = self.priroot_sh.exec_expension(
            self.primary_user_node.ssh_user, self.primary_user_node.ssh_user,
            self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)
        self.assertTrue(result)

        self.log.info("------------7.检查集群中节点个数-----------------")
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("2", result.splitlines()[-2].strip())

        self.log.info("-------------8.运行TPCC---------------")
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result)

        self.log.info('----检查主备是否同步----')
        time.sleep(10)
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result)

        self.log.info('----检查数据一致性----')
        nodes_tuple = (self.primary_user_node,
                       self.sta1_dbuser, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag)

    def tearDown(self):
        self.log.info("-------------还原集群---------------")
        time.sleep(300)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        if "2" != result.splitlines()[-2].strip():
            result = self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                                      *self.host_tuple,
                                                      **self.params)
            self.log.info(result)
            self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)
        self.log.info("----------还原synchronous_standby_names-------------")
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)
        self.primary_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                     f"'{self.synchronous_standby_names_p}'",
                                     node_name=p_hostname)
        s_hostname = self.sta1_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_1_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s1}'",
                                   node_name=s_hostname)
        s_hostname = self.sta2_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_2_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s2}'",
                                   node_name=s_hostname)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
