"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : 故障&可靠性测试
Case Name   : 磁盘满时创建修改删除schema
Create At   : 2021/01/27
@zou_jialiang0505328126
Description :
    1.注入磁盘满故障
    2.创建schema
    3.清理磁盘故障
    4.创建schema
    5.注入磁盘满
    6.修改schema
    7.清理磁盘故障
    8.修改schema
    9.注入磁盘满
    10.删除schema
    11.清理磁盘故障
    12.删除schema
Expect      :
    1.故障注入成功
    2.创建失败
    3.清理成功
    4.创建成功
    5.故障注入成功
    6.修改失败
    7.清理成功
    8.修改成功
    9.注入故障成功
    10.删除失败
    11.清理磁盘成功
    12.删除成功
History     :
    Modified by @zou_jialiang050 2022/04/26 : 使用CFE注入磁盘满故障后start数据库失败，
                                        优化脚本先stop再start数据库
"""

import unittest
import os
from yat.test import macro
from yat.test import Node
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class Ddldatabase(unittest.TestCase):
    commonshpri = CommonSH('PrimaryDbUser')
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f'-----{os.path.basename(__file__)} start-----')
        self.db_primary_root_node = Node(node='PrimaryRoot')
        self.constant = Constant()
        self.db_disk_name = ''
        self.schema_name = 'test_schema'

    def test_basebackup(self):
        self.log.info('------1.使用CFE注入磁盘满故障------')
        self.log.info('======获取主节点磁盘名======')
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        self.log.info(cmd)
        msg = self.db_primary_root_node.sh(cmd).result()
        self.log.info(msg)
        self.db_disk_name = msg.splitlines()[-1].split()[0].strip()

        cfedisk_cmd = f'{macro.CFE_PATH}/cfe ' \
            f'"inject  rfile_full (diskname) values  ({self.db_disk_name})"'
        self.log.info(cfedisk_cmd)
        result = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(result)

        self.log.info('------2.创建schema------')
        result = self.commonshpri.executDbSql(
            f"create schema {self.schema_name};")
        self.log.info(result)
        flag = (self.constant.FAILED_CONNECT_DB in result
                or self.constant.DISK_FULL_MSG in result)
        self.assertTrue(flag)

        self.log.info('------3.清除故障------')
        cfedisk_cmd = f'''{macro.CFE_PATH}/cfe "clean  
                rfile_full  where(diskname={self.db_disk_name})"'''
        tmp = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(tmp)

        self.log.info('------判断数据库是否需重启------')
        result = self.commonshpri.getDbClusterStatus('status')
        if not result:
            result = self.commonshpri.stopDbCluster()
            self.log.info(result)
            result = self.commonshpri.startDbCluster()
            self.log.info(result)

        self.log.info('------4.创建schema------')
        result = self.commonshpri.executDbSql(
            f"create schema {self.schema_name};")
        self.log.info(result)
        self.assertIn(self.constant.create_schema_success_msg, result)

        self.log.info('------5.使用CFE注入磁盘满故障------')
        cfedisk_cmd = f'{macro.CFE_PATH}/cfe ' \
            f'"inject  rfile_full (diskname) values  ({self.db_disk_name})"'''
        result = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(result)

        self.log.info('------6.修改schema------')
        result = self.commonshpri.executDbSql(
            f"alter schema {self.schema_name}"
            f" rename to {self.schema_name}_new;")
        self.log.info(result)
        flag = (self.constant.FAILED_CONNECT_DB in result
                or self.constant.DISK_FULL_MSG in result)
        self.assertTrue(flag)

        self.log.info('------7.清除故障------')
        cfedisk_cmd = f'''{macro.CFE_PATH}/cfe "clean  
                    rfile_full  where(diskname={self.db_disk_name})"'''
        tmp = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(tmp)

        self.log.info('------判断数据库是否需重启------')
        result = self.commonshpri.getDbClusterStatus('status')
        if not result:
            result = self.commonshpri.stopDbCluster()
            self.log.info(result)
            result = self.commonshpri.startDbCluster()
            self.log.info(result)

        self.log.info('------8.修改schema------')
        result = self.commonshpri.executDbSql(
            f"alter schema {self.schema_name}"
            f" rename to {self.schema_name}_new;")
        self.log.info(result)
        self.assertIn(self.constant.alter_schema_success_msg, result)

        self.log.info('------9.使用CFE注入磁盘满故障------')
        cfedisk_cmd = f'{macro.CFE_PATH}/cfe ' \
            f'"inject  rfile_full (diskname) values  ({self.db_disk_name})"'
        result = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(result)

        self.log.info('------10.删除schema------')
        result = self.commonshpri.executDbSql(
            f"drop schema {self.schema_name}_new;")
        self.log.info(result)
        flag = (self.constant.FAILED_CONNECT_DB in result
                or self.constant.DISK_FULL_MSG in result)
        self.assertTrue(flag)

        self.log.info('------11.清除故障------')
        cfedisk_cmd = f'''{macro.CFE_PATH}/cfe "clean  
                    rfile_full  where(diskname={self.db_disk_name})"'''
        tmp = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(tmp)

        self.log.info('------判断数据库是否需重启------')
        result = self.commonshpri.getDbClusterStatus('status')
        if not result:
            result = self.commonshpri.stopDbCluster()
            self.log.info(result)
            result = self.commonshpri.startDbCluster()
            self.log.info(result)

        self.log.info('------12.删除schema------')
        result = self.commonshpri.executDbSql(
            f"drop schema {self.schema_name}_new;")
        self.log.info(result)
        self.assertIn(self.constant.drop_schema_success_msg, result)

    def tearDown(self):
        self.log.info('------环境清理------')
        self.log.info('------清除故障------')
        cfedisk_cmd = f'''{macro.CFE_PATH}/cfe "clean  
                rfile_full  where(diskname={self.db_disk_name})"'''
        tmp = self.db_primary_root_node.sh(cfedisk_cmd).result()
        self.log.info(tmp)

        self.log.info('------判断数据库是否需重启------')
        result = self.commonshpri.getDbClusterStatus('status')
        if not result:
            result = self.commonshpri.stopDbCluster()
            self.log.info(result)
            result = self.commonshpri.startDbCluster()
            self.log.info(result)

        self.log.info('------删除schema------')
        result = self.commonshpri.executDbSql(
            f"drop schema if exists {self.schema_name} cascade;"
            f"drop schema if exists {self.schema_name}_new cascade;")
        self.log.info(result)
        self.log.info(f'-----{os.path.basename(__file__)} end-----')
