"""
Case Type   : 硬件故障类--disk
Case Name   : 数据库减容时主节点IOwait高
Create At   : 2021/07/15
Owner       : @zou_jialiang0505328126
Description :
    1.主节点注入ioWait高故障
    2.1主2备，减1备
    3.清理故障
    4.若2失败则再次减容
    5.检查减容结果
    6.执行TPCC，并检查数据一致性
Expect      :
    1.成功
    2.结果不一定，iowait超过一定值时可能导致减容ssh阻塞失败，该结果受执行环境性能影响。
    结果1：成功
    结果2：失败，打印不完全，但是主节点查询可能显示减容成功，ssh文件传输失败，此时在其他节点查询异常
    3.成功
    4.成功
    5.节点数变为2
    6.执行成功，主备一致
History     :
    Modified by @wan005 2021/8/3:扩容后需手动修改synchronous_standby_names
    Modified 2021/9/1 @wan005:适配最新CFE故障打印,并修改用例，减容失败后清理故障再次减容
    Modified by wx1115623 2023/2/16:扩容前重建root互信
"""

import unittest
import time
import os
from yat.test import Node, macro
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger


class DiskIOwait(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.constant = Constant()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.priroot_sh = CommonSH('PrimaryRoot')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')
        self.sta2root_sh = CommonSH('Standby2Root')
        self.host_tuple = (self.primary_user_node.ssh_host,
                           self.sta1_dbuser.ssh_host,
                           self.sta2_dbuser.ssh_host)
        self.params = {'-f': 'test_hosts'}

        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        self.log.info(cmd)
        msg = self.primary_root_node.sh(cmd).result()
        self.log.info(msg)
        self.dir = msg.splitlines()[-1].split()[-1].strip()

        result = self.primary_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_p = result.strip().splitlines()[-2]
        result = self.sta_1_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s1 = result.strip().splitlines()[-2]
        result = self.sta_2_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s2 = result.strip().splitlines()[-2]

    def test_main(self):
        text = '----step1:主节点注入ioWait高故障 expect:成功----'
        self.log.info(text)
        inject_command = f"rIO_iowait (type,dir,bs,count,time) " \
            f"values (w_iowait,{self.dir}, 100k,10000,1500)"
        result = self.com.cfe_inject(self.primary_root_node, inject_command)
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        self.assertIn('100k 10000', result, '执行失败:' + text)

        text2 = '----step2: 1主2备，减1备 expect:成功----'
        self.log.info(text2)
        execute_cmd = f'''source {macro.DB_ENV_PATH};
                            expect <<EOF
                            set timeout 500
                            spawn gs_dropnode -U \
                            {self.primary_user_node.ssh_user} \
                            -G {self.primary_user_node.ssh_user} \
                            -h {self.sta1_dbuser.ssh_host}
                            expect "*drop the target node (yes/no)?*"
                            send "yes\\n"
                            expect eof\n''' + '''EOF'''
        self.log.info(execute_cmd)
        result = self.primary_user_node.sh(execute_cmd).result()
        self.log.info(result)
        drop_success = False
        status = self.primary_sh.getDbClusterStatus('detail')
        if self.constant.drop_node_success_msg in result or \
                self.sta1_dbuser.ssh_host not in status:
            drop_success = True

        text = '----step3: 清除故障 expect:成功----'
        self.log.info(text)
        for i in range(3):
            result = self.com.cfe_clean(self.primary_root_node, "rIO_iowait")
            self.log.info(result)
            result = self.com.cfe_query(self.primary_root_node, "rIO_iowait")
            self.log.info(result)
        self.assertNotIn('100k 10000', result, '执行失败:' + text)

        if not drop_success:
            text = '----step4: 若2失败则再次减容 expect:成功----'
            self.log.info(text)
            execute_cmd = f'''source {macro.DB_ENV_PATH};
                                        expect <<EOF
                                        set timeout 500
                                        spawn gs_dropnode -U \
                                        {self.primary_user_node.ssh_user} \
                                        -G {self.primary_user_node.ssh_user} \
                                        -h {self.sta1_dbuser.ssh_host}
                                        expect "*drop the target node 
                                        (yes/no)?*"
                                        send "yes\\n"
                                        expect eof\n''' + '''EOF'''
            self.log.info(execute_cmd)
            result = self.primary_user_node.sh(execute_cmd).result()
            self.log.info(result)
            self.assertIn(self.constant.drop_node_success_msg, result,
                          '执行失败:' + text)

        text = '----step5:检查减容结果 expect:成功----'
        self.log.info(text)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("1", result.splitlines()[-2].strip(),
                         '执行失败:' + text)

        text = '----step6: 执行TPCC，并检查数据一致性 expect:执行成功，主备一致----'
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result,  '执行失败:' + text)
        time.sleep(10)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result,  '执行失败:' + text)
        nodes_tuple = (self.primary_user_node, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag,  '执行失败:' + text)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag,  '执行失败:' + text)

    def tearDown(self):
        self.log.info("--------------主节点清理故障-------------")
        result = self.com.cfe_clean(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        self.log.info("-------------还原集群---------------")
        result = self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                                   *self.host_tuple,
                                                   **self.params)
        self.log.info(result)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        if "2" != result.splitlines()[-2].strip():
            self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)
        self.log.info("----------还原synchronous_standby_names-------------")
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)
        self.primary_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                     f"'{self.synchronous_standby_names_p}'",
                                     node_name=p_hostname)
        s_hostname = self.sta1_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_1_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s1}'",
                                   node_name=s_hostname)
        s_hostname = self.sta2_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_2_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s2}'",
                                   node_name=s_hostname)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
