"""
Case Type   : 硬件故障类--network
Case Name   : 数据库减容/扩容时，主节点网络链路闪断
Create At   : 2021/09/24
Owner       : 5328126
Description :
    1.1主2备，减1备
    2.减容的同时注入注入网络链路闪断
    3.检查集群中节点个数
    4.执行TPCC，并检查数据一致性
    5.将减容节点还原
    6.扩容的同时注入网络链路闪断
    7.再次扩容
    8.检查集群中节点个数
    9.执行TPCC，并检查数据一致性
Expect      :
    1.成功
    2.成功
    3.节点数变为2
    4.执行成功，主备一致
    5.结果不确定
    6.成功
    7.成功
    8.节点数为3
    9.成功，数据一致
History     :
    Modified by opentestcase026 2022/2/9:再次扩容前未等待故障消除
    Modified by wx1115623 2022/6/8:添加判断条件，注入网络链路闪断故障时减容失败后再次执行减容
    Modified by wx1115623 2022/10/10:执行TPCC时增加等待时间，等故障清除
    Modified by 1205145 2023/10/31:扩容前重建root互信
"""

import unittest
import time
import os
from yat.test import Node, macro
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger


class CPUOverloadAndRunTPCC(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.constant = Constant()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.priroot_sh = CommonSH('PrimaryRoot')
        self.sta2root_sh = CommonSH('Standby2Root')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')

        self.log.info('----获取网卡----')
        network_card_cmd = 'ifconfig'
        self.log.info(network_card_cmd)
        network_card_result = self.primary_root_node.sh(
            network_card_cmd).result()
        network_card_list = network_card_result.split('\n\n')
        self.log.info(network_card_list)
        self.networkcard = ''
        for network_card_item in network_card_list:
            if self.primary_user_node.db_host in network_card_item:
                self.networkcard = network_card_item.split(':')[0].strip()
                self.log.info(self.networkcard)

        result = self.primary_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_p = result.strip().splitlines()[-2]
        result = self.sta_1_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s1 = result.strip().splitlines()[-2]
        result = self.sta_2_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s2 = result.strip().splitlines()[-2]
        self.host_tuple = (self.primary_user_node.ssh_host,
                           self.sta1_dbuser.ssh_host, self.sta2_dbuser.ssh_host)
        self.params = {'-f': 'test_hosts'}

    def test_main(self):
        drop_text = '----step1: 1主2备，减1备 expect:成功----'
        self.log.info(drop_text)
        drop_thread = ComThread(self.primary_sh.exec_dropnode,
                                args=(self.sta1_dbuser,))
        drop_thread.setDaemon(True)
        drop_thread.start()

        cfe_text = '----step2: 减容的同时注入网络链路闪断 expect:成功----'
        self.log.info(cfe_text)
        time.sleep(1)
        inject_command = f" rNetLink_flash (repeat,duration,interval,dev) " \
            f"values (50,0.5,5,{self.networkcard})"
        cfe_thread = ComThread(
            self.com.cfe_inject, args=(self.primary_root_node,
                                       inject_command,))
        cfe_thread.setDaemon(True)
        cfe_thread.start()

        self.log.info("-------------------获取故障结果---------------------")
        cfe_thread.join(400)
        result = cfe_thread.get_result()
        self.log.info(result)
        flg1 = self.constant.cfe_inject_netflash_success_msg \
               in result or \
               'Determining IP information' in result
        self.assertTrue(flg1, '执行失败' + cfe_text)

        self.log.info("-----------获取减容结果-------------------")
        drop_thread.join(1800)
        result = drop_thread.get_result()
        self.log.info(result)
        if self.constant.drop_node_success_msg not in result:
            time.sleep(300)
            drop_thread.join(1800)
            result = drop_thread.get_result()
            self.log.info(result)
            self.assertIn(self.constant.drop_node_success_msg, result,
                          '执行失败:' + drop_text)
        else:
            self.assertIn(self.constant.drop_node_success_msg, result,
                          '执行失败:' + drop_text)

        text = '----step3: 检查集群中节点个数 expect:节点数变为2----'
        self.log.info(text)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("1", result.splitlines()[-2].strip(), '执行失败:' + text)
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)

        text = '----step4: 执行TPCC，并检查数据一致性 expect:执行成功，主备一致----'
        self.log.info(text)
        self.log.info("-----等待故障结束-----")
        time.sleep(300)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result, '执行失败:' + text)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        nodes_tuple = (self.primary_user_node, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag, '执行失败:' + text)
        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag, '执行失败:' + text)

        exp_text = '----step5: 将减容节点还原 expect:失败----'
        self.log.info(exp_text)
        self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                          *self.host_tuple,
                                          **self.params)
        exp_thread = ComThread(self.priroot_sh.exec_expension,
                               args=(self.primary_user_node.ssh_user,
                                     self.primary_user_node.ssh_user,
                                     self.sta1_dbuser.ssh_host,
                                     macro.DB_XML_PATH, ))
        exp_thread.setDaemon(True)
        exp_thread.start()

        cfe_text = '----step6: 扩容的同时注入网络链路闪断 expect:成功----'
        self.log.info(cfe_text)
        time.sleep(1)
        inject_command = f" rNetLink_flash (repeat,duration,interval,dev) " \
            f"values (50,0.5,5,{self.networkcard})"
        cfe_thread = ComThread(
            self.com.cfe_inject, args=(self.primary_root_node,
                                       inject_command,))
        cfe_thread.setDaemon(True)
        cfe_thread.start()

        self.log.info("-------------------获取故障结果---------------------")
        cfe_thread.join(400)
        result = cfe_thread.get_result()
        self.log.info(result)
        self.assertIn(self.constant.cfe_inject_netflash_success_msg, result,
                      '执行失败:' + cfe_text)

        self.log.info("-------------------获取扩容结果---------------------")
        exp_thread.join(1800)
        exp_thread_result = exp_thread.get_result()
        self.log.info(exp_thread_result)

        exp_text = '----step7: 再次扩容 expect:成功----'
        time.sleep(50*6)
        if not exp_thread_result:
            self.log.info(exp_text)
            self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                              *self.host_tuple,
                                              **self.params)
            result = self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)
            self.assertTrue(result, '执行失败:' + text)
        else:
            self.assertTrue(exp_thread_result, '执行失败:' + text)

        text = '----step8: 检查集群中节点个数 expect:节点数变为3----'
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("2", result.splitlines()[-2].strip(), '执行失败:' + text)

        text = '----step9: 执行TPCC，并检查数据一致性 expect:执行成功，主备一致----'
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result,
                      '执行失败:' + text)
        time.sleep(10)
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        nodes_tuple = (self.primary_user_node,
                       self.sta1_dbuser, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag, '执行失败:' + text)
        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag, '执行失败:' + text)

    def tearDown(self):
        time.sleep(300)
        self.log.info("-------------还原集群---------------")
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        if "2" != result.splitlines()[-2].strip():
            self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                              *self.host_tuple,
                                              **self.params)
            self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)

        self.log.info("----------还原synchronous_standby_names-------------")
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)
        self.primary_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                     f"'{self.synchronous_standby_names_p}'",
                                     node_name=p_hostname)
        s_hostname = self.sta1_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_1_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s1}'",
                                   node_name=s_hostname)
        s_hostname = self.sta2_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_2_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s2}'",
                                   node_name=s_hostname)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
