"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : 硬件故障类--磁盘
Case Name   : pg_log日志记录超大导致磁盘空间占满
Create At   : 2020-11-02
Owner       : n@ningyali
Description :
    1、注入故障，将磁盘空间写到剩余400M
    2、重复执行dml操作，生成pg_log，直至磁盘满
    3、清除故障，执行dml操作
Expect      :
    1、注入故障成功，磁盘空间剩余400M
    2、pg_log日志大小占满磁盘后，dml执行失败，有相应磁盘满提示
    3、清楚故障后，dml执行成功
History     :
    2021/1/20 n@ningyali 删除pg_log修改为移除后还原
    2021/7/5 n@ningyali 增加stop集群、查询状态、查看pid文件和查看进程以分析start集群失败问题
"""

import unittest

from yat.test import Node
from yat.test import macro

from testcase.utils.Common import *
from testcase.utils.CommonSH import *
from testcase.utils.Logger import *


class BigPGLogAndDiskFull(unittest.TestCase):
    primary_sh = CommonSH('PrimaryDbUser')
    standby1_sh = CommonSH('Standby1DbUser')
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info('----Opengauss_Reliability_Hardware_Case013:开始执行----')
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.standby1_root_node = Node(node='Standby1Root')
        self.standby1_user_node = Node(node='Standby1DbUser')
        self.com = Common()
        self.Constant = Constant()
        self.node_num = self.com.get_node_num(self.primary_root_node)
        self.max_time = 1200
        self.t_name = 'test_hardware013'
        self.pg_log_path = os.path.join(macro.PG_LOG_PATH,
                                        macro.DN_NODE_NAME.split('/')[0])

        self.log.info("----获取主节点数据目录所在磁盘----")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        self.log.info(cmd)
        msg = self.primary_root_node.sh(cmd).result()
        self.log.info(msg)
        self.db_disk_name = msg.splitlines()[-1].split()[0].strip()

    def test_main(self):
        self.log.info("----设置配置文件中相关参数----")
        # 每次会话连接时向日志里打印一条信息
        msg = self.primary_sh.executeGsguc('set',
                                           self.Constant.GSGUC_SUCCESS_MSG,
                                           'log_connections=on')
        self.assertTrue(msg)
        # 每次会话结束时向日志里打印一条信息
        msg = self.primary_sh.executeGsguc('set',
                                           self.Constant.GSGUC_SUCCESS_MSG,
                                           'log_disconnections=on')
        self.assertTrue(msg)
        # 配置日志中记录所有SQL语句
        msg = self.primary_sh.executeGsguc('set',
                                           self.Constant.GSGUC_SUCCESS_MSG,
                                           "log_statement=all")
        self.assertTrue(msg)

        self.log.info("----重启集群，使以上设置生效----")
        is_stopped = self.primary_sh.stopDbCluster()
        self.assertTrue(is_stopped)
        is_started = self.primary_sh.startDbCluster()
        self.assertTrue(is_started)

        self.log.info("----建表----")
        create_table_cmd = f'''drop table if exists {self.t_name}; 
                    create table {self.t_name}(id int);'''
        self.log.info(create_table_cmd)
        msg = self.primary_sh.executDbSql(create_table_cmd)
        self.log.info(msg)
        for error_msg in self.Constant.SQL_WRONG_MSG:
            self.assertNotIn(error_msg, msg)

        self.log.info("----注入磁盘满故障----")
        cfe_command = f'rfile_full (diskname) values ({self.db_disk_name})'
        cfe_msg = self.com.cfe_inject(self.primary_root_node, cfe_command)
        self.log.info(cfe_msg)
        self.assertIn(self.Constant.CFE_DISK_FULL_SUCCESS_MSG, cfe_msg)

        self.log.info("----重复执行dml操作，生成pg_log日志----")
        for i in range(10000):
            self.log.info("----查看pg_log大小----")
            shell_cmd = f"du {self.pg_log_path}"
            self.log.info(shell_cmd)
            msg = self.primary_user_node.sh(shell_cmd).result()
            self.log.info(msg)

            self.log.info("----查看磁盘剩余可用空间----")
            avail_size = self.com.getAvailSize(self.primary_root_node,
                                               macro.DB_INSTANCE_PATH)
            self.log.info('Available:' + str(avail_size))

            self.log.info(f"----第{i}次：重复执行dml操作----")
            insert_cmd = f'''
                insert into {self.t_name} values (generate_series(1,10000));
                update {self.t_name} set id=id+1;
                select count(*) from {self.t_name};
                '''
            dml_result = self.primary_sh.executDbSql(insert_cmd)
            self.log.info('sqlexecute result: ' + dml_result)

            if self.Constant.DISK_FULL_MSG in dml_result or avail_size == '0':
                self.log.info('磁盘已满')
                break

        self.log.info("----pg_log占满磁盘后数据库无法写入，dml执行失败，有相应磁盘满提示----")
        insert_cmd = f"insert into {self.t_name} values (10001);" \
            f"update {self.t_name} set id=id+1; " \
            f"select count(*) from {self.t_name};"
        dml_result = self.primary_sh.executDbSql(insert_cmd)
        self.log.info('sqlexecute result: ' + dml_result)
        # 磁盘满可能导致数据库主节点stop，继而连接数据库失败
        if self.Constant.FAILED_CONNECT_DB not in dml_result:
            self.assertIn(self.Constant.DISK_FULL_MSG, dml_result)

        self.log.info("----清除故障----")
        cfe_command = f"rfile_full where (diskname={self.db_disk_name})"
        cfe_msg = self.com.cfe_clean(self.primary_root_node, cfe_command)
        self.log.info(cfe_msg)
        self.assertIn(self.Constant.CFE_DISK_CLEAN_SUCCESS_MSG, cfe_msg)

        self.log.info("----------------查看磁盘剩余可用空间-----------------")
        avail_size = self.com.getAvailSize(self.primary_root_node,
                                           macro.DB_INSTANCE_PATH)
        self.log.info('Available:' + avail_size)

        self.log.info("-----------删除pg_log文件-----------")
        pg_log_bak_path = os.path.join(macro.DB_BACKUP_PATH, 'pg_log')
        shell_cmd = f"mkdir {pg_log_bak_path}; " \
            f"mv {os.path.join(self.pg_log_path, '*')} {pg_log_bak_path}; " \
            f"ls -l {self.pg_log_path}"
        self.log.info(shell_cmd)
        msg = self.primary_root_node.sh(shell_cmd).result()
        self.log.info(msg)

        self.log.info("----查看磁盘剩余可用空间----")
        avail_size = self.com.getAvailSize(self.primary_root_node,
                                           macro.DB_INSTANCE_PATH)
        self.log.info('Available:' + avail_size)

        self.log.info("----查看数据库集群状态----")
        self.log.info(self.primary_sh.getDbClusterStatus('status'))
        query_msg = self.primary_sh.executeGsctl('query', '', get_detail=True)
        self.log.info(query_msg)

        self.log.info("----查看pid文件及进程----")
        pid_file_path = os.path.join(macro.DB_INSTANCE_PATH,
                                     macro.PID_FILE_NAME)
        self.log.info(
            self.primary_user_node.sh(f'ls -al {pid_file_path}').result())
        self.log.info(self.primary_user_node.sh(
            'ps ux|grep gaussdb|grep -v grep').result())

        self.log.info("----磁盘满可能导致数据库主节点stop，重启集群以恢复----")
        is_stopped = self.primary_sh.stopDbCluster(command='-m immediate')
        self.log.info(is_stopped)
        is_started = self.primary_sh.startDbCluster()
        self.log.info(is_started)

        self.log.info("----查看主机query，同步是否正常----")
        self.primary_sh.check_location_consistency('primary',
                                                   self.node_num,
                                                   self.max_time)

        self.assertTrue(self.primary_sh.getDbClusterStatus('status'))

        self.log.info("----清除故障后dml执行成功----")
        self.log.info(insert_cmd)
        msg = self.primary_sh.executDbSql(insert_cmd)
        self.log.info(msg)
        for error_msg in self.Constant.SQL_WRONG_MSG:
            self.assertNotIn(error_msg, msg)

    def tearDown(self):
        self.log.info("----清除故障----")
        cfe_command = f"rfile_full where (diskname={self.db_disk_name})"
        cfe_msg = self.com.cfe_clean(self.primary_root_node, cfe_command)
        self.log.info(cfe_msg)
        self.assertIn(self.Constant.CFE_DISK_CLEAN_SUCCESS_MSG, cfe_msg)

        self.log.info("----还原pg_log----")
        pg_log_bak_path = os.path.join(macro.DB_BACKUP_PATH, 'pg_log')
        shell_cmd = f"mv {os.path.join(pg_log_bak_path, '*')} " \
            f"{self.pg_log_path}"
        self.log.info(shell_cmd)
        msg = self.primary_root_node.sh(shell_cmd).result()
        self.log.info(msg)

        self.log.info("----删除表----")
        drop_table_cmd = f'''drop table if exists {self.t_name}; '''
        self.log.info(drop_table_cmd)
        msg = self.primary_sh.executDbSql(drop_table_cmd)
        self.log.info(msg)

        self.log.info("----查看磁盘剩余可用空间----")
        avail_size = self.com.getAvailSize(self.primary_root_node,
                                           macro.DB_INSTANCE_PATH)
        self.log.info('Available:' + avail_size)

        self.log.info("----还原默认值----")
        self.primary_sh.executeGsguc('set', self.Constant.GSGUC_SUCCESS_MSG,
                                     'log_connections=off')
        self.primary_sh.executeGsguc('set', self.Constant.GSGUC_SUCCESS_MSG,
                                     "log_disconnections=off")
        self.primary_sh.executeGsguc('set', self.Constant.GSGUC_SUCCESS_MSG,
                                     'log_statement=none')

        self.primary_sh.stopDbCluster()
        self.primary_sh.startDbCluster()

        self.log.info('----Opengauss_Reliability_Hardware_Case013:执行完成----')
