"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : 数据库系统
Case Name   : 双机场景，主机磁盘占满，备机做重建和平滑切换
Create At   : 2020/07/25
@zou_jialiang0505328126
Description :
    1、主机磁盘不足场景模拟（可以使用cfe故障注入方式）
    2、备机进行平滑切换
    3、恢复主机磁盘空间
    4、重启集群
    5、备机再次进行切换
Expect      :
    1、故障注入成功
    2、切换失败
    3、执行成功
    4、执行成功
    5、执行成功
History     :
    created： 2020/07/25  created by @wan005
    modified： 2021/06/03 n@ningyali 主机注入故障，teardown中清除故障使用的是备节点，需修改
    modified： 2021/06/03 @wan005 修改故障注入地址获取变量
"""
import unittest

from yat.test import Node
from yat.test import macro

from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH, RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger

logger = Logger()
common = Common()
commonshpri = CommonSH('PrimaryDbUser')
commonshsta1 = CommonSH('Standby1DbUser')
commonshsta2 = CommonSH('Standby2DbUser')


class DiskFull(unittest.TestCase):
    dbPrimaryUserNode = Node(node='PrimaryDbUser')
    dbPrimaryRootNode = Node(node='PrimaryRoot')
    dbStandby1UserNode = Node(node='Standby1DbUser')
    DB_DISK_NAME = ''
    SWITCHOVER_FAIL_MSG = Constant.SWITCHOVER_FAIL_MSG
    REFRESHCONF_SUCCESS_MSG = Constant.REFRESHCONF_SUCCESS_MSG
    SWITCH_SUCCESS_MSG = Constant.SWITCH_SUCCESS_MSG
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        logger.info("----this is setup----")
        logger.info('----Opengauss_Reliability_Dbsys_Case024.py start----')

    def test_disk_full(self):
        logger.info("----get pri disk name----")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        logger.info(cmd)
        msg = self.dbPrimaryRootNode.sh(cmd).result()
        logger.info(msg)
        self.DB_DISK_NAME = msg.splitlines()[-1].split()[0].strip()

        logger.info("----pri disk full----")
        cfedisk_cmd = '{cfe_path}/cfe "inject  rfile_full (diskname) values ' \
                      '({diskname})"'.format(cfe_path=macro.CFE_PATH,
                                             diskname=self.DB_DISK_NAME)
        tmp = self.dbPrimaryRootNode.sh(cfedisk_cmd).result()
        logger.info(tmp)

        logger.info("----standby1 switchover----")
        switchover_cmd = "source {source_path}; " \
                         "gs_ctl switchover -D {DN_PATH} -m fast" \
                         "".format(source_path=macro.DB_ENV_PATH,
                                   DN_PATH=macro.DB_INSTANCE_PATH)
        switchover_msg = self.dbStandby1UserNode.sh(switchover_cmd).result()
        logger.info(switchover_msg)
        self.assertIn(self.SWITCHOVER_FAIL_MSG, switchover_msg)

        logger.info("----pri clean disk----")
        cfedisk_cmd = '{cfe_path}/cfe "clean rfile_full where' \
                      '(diskname={diskname})"' \
                      ''.format(cfe_path=macro.CFE_PATH,
                                diskname=self.DB_DISK_NAME)
        tmp = self.dbPrimaryRootNode.sh(cfedisk_cmd).result()
        logger.info(tmp)

        logger.info("----restart cluster----")
        res = commonshpri.stopDbCluster()
        self.assertTrue(res)
        res = commonshpri.startDbCluster()
        self.assertTrue(res)

        logger.info("----standby1 refreshconf----")
        switchover_cmd = 'source {source_path}; ' \
                         'gs_ctl switchover  -D {DN_PATH} -m fast; ' \
                         'gs_om -t refreshconf' \
                         ''.format(source_path=macro.DB_ENV_PATH,
                                   DN_PATH=macro.DB_INSTANCE_PATH)
        logger.info(switchover_cmd)
        switchover_msg = self.dbStandby1UserNode.sh(switchover_cmd).result()
        logger.info(switchover_msg)
        self.assertIn(self.SWITCH_SUCCESS_MSG, switchover_msg)
        self.assertIn(self.REFRESHCONF_SUCCESS_MSG, switchover_msg)

    def tearDown(self):
        logger.info('----this is tearDown----')
        logger.info("----clean disk----")
        cfedisk_cmd = '{cfe_path}/cfe "clean rfile_full where' \
                      '(diskname={diskname})"' \
                      ''.format(cfe_path=macro.CFE_PATH,
                                diskname=self.DB_DISK_NAME)
        tmp = self.dbPrimaryRootNode.sh(cfedisk_cmd).result()
        logger.info(tmp)

        logger.info('----get primary hostname----')
        hostname = self.dbPrimaryRootNode.sh('hostname').result()
        logger.info(hostname)

        if commonshpri.check_whether_need_switch(hostname):
            logger.info('----switchover restore original primary----')
            switchover_cmd = 'source {source_path}; ' \
                             'gs_ctl switchover  -D {DN_PATH}; ' \
                             'gs_om -t refreshconf' \
                             ''.format(source_path=macro.DB_ENV_PATH,
                                       DN_PATH=macro.DB_INSTANCE_PATH)
            switchover_msg = self.dbPrimaryUserNode.sh(switchover_cmd).result()
            logger.info(switchover_msg)
            self.assertIn(self.SWITCH_SUCCESS_MSG, switchover_msg)
            self.assertIn(self.REFRESHCONF_SUCCESS_MSG, switchover_msg)
