"""
Case Type   : 硬件故障类--disk
Case Name   : 数据库扩容时主节点IOwait高
Create At   : 2021/07/15
Owner       : 5328126
Description :
    1.主节点注入ioWait高故障
    2.扩容
    3.检查集群中节点个数
    4.清除故障
    5.执行TPCC，并检查数据一致性
Expect      :
    1.成功
    2.成功
    3.节点数增1
    4.成功
    5执行成功，主备一致
History     :
    Modified by 5328126 2021/8/3:扩容后需手动修改synchronous_standby_names
    Modified 2021/9/1 5328126:适配最新CFE故障打印
    Modified by 1115623 2023/2/16:扩容前重建root互信
"""

import unittest
import time
import os
from yat.test import Node, macro
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger


class DiskIOwait(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.constant = Constant()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.priroot_sh = CommonSH('PrimaryRoot')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')
        self.sta2root_sh = CommonSH('Standby2Root')
        self.host_tuple = (self.primary_user_node.ssh_host,
                           self.sta1_dbuser.ssh_host,
                           self.sta2_dbuser.ssh_host)
        self.params = {'-f': 'test_hosts'}

        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        self.log.info(cmd)
        msg = self.primary_root_node.sh(cmd).result()
        self.log.info(msg)
        self.dir = msg.splitlines()[-1].split()[-1].strip()

        result = self.primary_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_p = result.strip().splitlines()[-2]
        result = self.sta_1_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s1 = result.strip().splitlines()[-2]
        result = self.sta_2_sh.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(f"synchronous_standby_names is {result}")
        self.synchronous_standby_names_s2 = result.strip().splitlines()[-2]

        self.log.info("-------------2.减容-----------------")
        execute_cmd = f'''source {macro.DB_ENV_PATH};
                                    expect <<EOF
                                    set timeout 120
                                    spawn gs_dropnode -U \
                                    {self.primary_user_node.ssh_user} \
                                    -G {self.primary_user_node.ssh_user} \
                                    -h {self.sta1_dbuser.ssh_host}
                                    expect "*drop the target node (yes/no)?*"
                                    send "yes\\n"
                                    expect eof\n''' + '''EOF'''
        self.log.info(execute_cmd)
        result = self.primary_user_node.sh(execute_cmd).result()
        self.log.info(result)
        self.assertIn("Success to drop the target nodes", result)

    def test_main(self):
        text = '----step1: 主节点磁盘IOwait高 expect:成功----'
        self.log.info(text)
        inject_command = f"rIO_iowait (type,dir,bs,count,time) " \
            f"values (w_iowait,{self.dir}, 10000,10000,1500)"
        result = self.com.cfe_inject(self.primary_root_node, inject_command)
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        self.assertIn(self.constant.cfe_iowait_success_msg, result,
                      '执行失败:' + text)

        text = '----step2: 扩容 expect:成功----'
        self.log.info(text)
        result = self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                                   *self.host_tuple,
                                                   **self.params)
        self.log.info(result)
        result = self.priroot_sh.exec_expension(
            self.primary_user_node.ssh_user, self.primary_user_node.ssh_user,
            self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)
        if not result:
            status = self.primary_sh.getDbClusterStatus("status")
            self.log.info(status)
            self.assertTrue(status, '执行失败:' + text)
        else:
            self.assertTrue(result, '执行失败:' + text)

        text = '----step3: 检查集群中节点个数 expect:成功----'
        self.log.info(text)
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.assertEqual("2", result.splitlines()[-2].strip(), '执行失败:' + text)

        text = '----step4: 清除故障 expect:成功----'
        self.log.info(text)
        result = self.com.cfe_clean(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        self.assertNotIn("w_iowait", result, '执行失败:' + text)

        text = '----step5: 执行TPCC，并检查数据一致性 expect:执行成功，主备一致----'
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result, '执行失败:' + text)
        time.sleep(10)
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result, '执行失败:' + text)
        nodes_tuple = (self.primary_user_node,
                       self.sta1_dbuser, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag, '执行失败:' + text)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag, '执行失败:' + text)

    def tearDown(self):
        self.log.info("--------------主节点清理故障-------------")
        result = self.com.cfe_clean(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        result = self.com.cfe_query(self.primary_root_node, "rIO_iowait")
        self.log.info(result)
        self.log.info("-------------还原集群---------------")
        sql = "select count(*) from pg_stat_get_wal_senders();"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        if "2" != result.splitlines()[-2].strip():
            self.sta2root_sh.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,
                                              *self.host_tuple,
                                              **self.params)
            self.priroot_sh.exec_expension(
                self.primary_user_node.ssh_user,
                self.primary_user_node.ssh_user,
                self.sta1_dbuser.ssh_host, macro.DB_XML_PATH)
        self.log.info("----------还原synchronous_standby_names-------------")
        p_hostname = self.primary_root_node.sh("hostname").result()
        self.log.info(p_hostname)
        self.primary_sh.executeGsguc('reload',
                                     self.constant.GSGUC_SUCCESS_MSG,
                                     f"synchronous_standby_names="
                                     f"'{self.synchronous_standby_names_p}'",
                                     node_name=p_hostname)
        s_hostname = self.sta1_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_1_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s1}'",
                                   node_name=s_hostname)
        s_hostname = self.sta2_dbuser.sh("hostname").result()
        self.log.info(s_hostname)
        self.sta_2_sh.executeGsguc('reload',
                                   self.constant.GSGUC_SUCCESS_MSG,
                                   f"synchronous_standby_names="
                                   f"'{self.synchronous_standby_names_s2}'",
                                   node_name=s_hostname)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
