"""
Case Type   : 硬件故障类--network
Case Name   : 数据库减容/扩容时，主节点网络错报率3%
Create At   : 2021/07/29
Owner       : 5328126
Description :
    1.主注入网络错包故障
    2.1主2备，减1备  期望:减容成功
    3.检查集群中节点个数
    4.执行TPCC，并检查数据一致性
    5.将减容节点还原
    6.检查集群中节点个数
    7.执行TPCC，并检查数据一致性
Expect      :
    1.成功
    2.成功
    3.节点数变为2
    4.执行成功，主备一致
    5.成功
    6.节点数变为3
    7.执行成功，主备一致
History     :
    2021/9/1 5328126:注入错包故障会导致ssh异常，修改用例及脚本，降低错报率
    Modified by 5328126 2022/2/27:网络错包会导致环境SSH异常，调整错包率规避该问题
    Modified by 1115623 2022/10/10:增加等待时间，等备机追赶上主机再进行一致性检查
    Modified by 1205145 2023/10/31:扩容前重建root互信
"""

import unittest
import time
import os
from yat.test import Node, macro
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger
from testcase.utils.ComThread import ComThread


class CPUOverloadAndRunTPCC(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.constant = Constant()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.priroot_sh = CommonSH('PrimaryRoot')
        self.sta2root_sh = CommonSH('Standby2Root')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')

        self.log.info('----获取网卡----')
        network_card_cmd = 'ifconfig'
        self.log.info(network_card_cmd)
        network_card_result = self.primary_root_node.sh(
            network_card_cmd).result()
        network_card_list = network_card_result.split('\n\n')
        self.log.info(network_card_list)
        self.networkcard = ''
        for network_card_item in network_card_list:
            if self.primary_user_node.db_host in network_card_item:
                self.networkcard = network_card_item.split(':')[0].strip()
                self.log.info(self.networkcard)

        result = self.primary_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_p = result.strip().splitlines()[-2]
        result = self.sta_1_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s1 = result.strip().splitlines()[-2]
        result = self.sta_2_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s2 = result.strip().splitlines()[-2]
        self.host_tuple = (self.primary_user_node.ssh_host,
                           self.sta1_dbuser.ssh_host, self.sta2_dbuser.ssh_host)
        self.params = {'-f': 'test_hosts'}

    def test_main(self):
        text = '----step1: 主注入网络错包故障 expect:成功----'
        self.log.info(text)
        inject_command = f"rNet_corrupt " \
            f"(dev, rate)values({self.networkcard}, " \
            f"{self.constant.network_error_rate})"
        result = self.com.cfe_inject(self.primary_root_node, inject_command)
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard}")
        self.log.info(result)
        self.assertIn(f"corrupt {self.constant.network_error_rate}",
                      result, '执行失败:' + text)

        text = '----step2: 1主2备，减1备 expect:减容成功----'
        self.log.info(text)
        execute_cmd = f'''source {macro.DB_ENV_PATH};
                            expect <<EOF
                            set timeout 120
                            spawn gs_dropnode -U \
                            {self.primary_user_node.ssh_user} \
                            -G {self.primary_user_node.ssh_user} \
                            -h {self.sta1_dbuser.ssh_host}
                            expect "*drop the target node (yes/no)?*"
                            send "yes\\n"
                            expect eof\n''' + '''EOF'''
        self.log.info(execute_cmd)
        result = self.primary_user_node.sh(execute_cmd).result()
        self.log.info(result)
        self.assertIn("Success to drop the target nodes", result,
                      '执行失败:' + text)

        text = '----step3: 检查集群中节点个数 expect:成功----'
        self.log.info(text)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("1", result.splitlines()[-2].strip(),
                         '执行失败:' + text)

        text = '----step4: 执行TPCC，并检查数据一致性 expect:执行成功，主备一致----'
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result,
                      '执行失败:' + text)
        time.sleep(10)
        result = self.com.cfe_clean(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard} and rate="
                                    f"{self.constant.network_error_rate}")
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard}")
        self.log.info(result)
        time.sleep(100)
        query_msg = self.primary_sh.executeGsctl('query', '', get_detail=True)
        if "db_state                       : Normal" in query_msg:
            result = self.sta_2_sh.check_data_consistency()
            self.assertTrue(result, '执行失败:' + text)
            nodes_tuple = (self.primary_user_node, self.sta2_dbuser)
            flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
            self.assertTrue(flag, '执行失败:' + text)
            table_dict = self.com.format_sql_result(
                self.primary_sh.executDbSql(r'\d'))
            table_name = table_dict.get('Name')
            for name in table_name:
                select_sql = f'select count(*) from {name};'
                flag = self.com.check_data_sample_by_all(select_sql,
                                                         *nodes_tuple)
                self.assertTrue(flag, '执行失败:' + text)

        text = '----step5: 将减容节点还原 expect:可能由于网络异常导致失败----'
        self.log.info(text)
        self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                          *self.host_tuple,
                                          **self.params)
        expension_thread = ComThread(self.priroot_sh.exec_expension,
            args=(self.primary_user_node.ssh_user,
                  self.primary_user_node.ssh_user,
                  self.sta1_dbuser.ssh_host,
                  macro.DB_XML_PATH))
        expension_thread.setDaemon(True)
        expension_thread.start()
        expension_thread.join(15*60)
        result_expension = expension_thread.get_result()
        if not result_expension:
            result = self.com.cfe_clean(self.primary_root_node,
                                        f"rNet_corrupt where "
                                        f"dev={self.networkcard} and rate="
                                        f"{self.constant.network_error_rate}")
            self.log.info(result)
            result = self.com.cfe_query(self.primary_root_node,
                                        f"rNet_corrupt where "
                                        f"dev={self.networkcard}")
            self.log.info(result)
            time.sleep(5)
            self.com.kill_pid_keyword('build', '9', self.sta1_dbuser)
            time.sleep(5)
            result_expension = self.primary_root_node(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host,
                macro.DB_XML_PATH)
        self.assertTrue(result_expension, '执行失败:' + text)

        text = '----step6: 检查集群中节点个数 expect:成功----'
        self.log.info(text)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("2", result.splitlines()[-2].strip(),
                         '执行失败:' + text)

        text = '----step7: 执行TPCC，并检查数据一致性 expect:执行成功，主备一致----'
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result, '执行失败:' + text)

        time.sleep(10)
        result = self.com.cfe_clean(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard} and rate="
                                    f"{self.constant.network_error_rate}")
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard}")
        self.log.info(result)
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        nodes_tuple = (self.primary_user_node,
                       self.sta1_dbuser, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag, '执行失败:' + text)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag, '执行失败:' + text)

    def tearDown(self):
        self.log.info("-------------还原集群---------------")
        self.log.info("--------------主节点清理故障-------------")
        result = self.com.cfe_clean(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard} and rate="
                                    f"{self.constant.network_error_rate}")
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node,
                                    f"rNet_corrupt where "
                                    f"dev={self.networkcard}")
        self.log.info(result)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        if "2" != result.splitlines()[-2].strip():
            self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                              *self.host_tuple,
                                              **self.params)
            self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)

        self.log.info("----------还原synchronous_standby_names-------------")
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)
        self.primary_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                     f"'{self.synchronous_standby_names_p}'",
                                     node_name=p_hostname)
        s_hostname = self.sta1_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_1_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s1}'",
                                     node_name=s_hostname)
        s_hostname = self.sta2_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_2_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s2}'",
                                     node_name=s_hostname)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
