"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : 故障&可靠性测试
Case Name   : 事务提交方式不为remote_apply,备节点注入磁盘满故障，清除故障时间大于延迟时间
Create At   : 2021/03/01
@zou_jialiang0505328126
Description :
    1.设置synchronous_commit=on,synchronous_standby_names=dn_6002,dn_6003
    2.使用gs_guc set方式设置recovery_min_apply_delay
    3.重启数据库
    4.查询集群同步方式
    5.等待主备一致
    6.创建表，并插入数据
    7.备节点1注入磁盘满
    8.等待20s清除故障
    9.重启集群
    10.查询备节点
    11.更新数据
    12.备节点1,2均注入磁盘满故障
    13.等待20s清除故障
    14.重启集群
    15.等待查询备节点
Expect      :
    1.设置成功
    2.设置成功
    3.重启成功
    4.集群为同步
    5.主备一致
    6.创建成功
    7.故障注入成功
    8.清除故障成功
    9.start时可能集群状态为degrade
    10.主备同步
    11.更新数据成功
    12.注入故障成功
    13.清除故障成功
    14.start时可能集群状态为degrade
    15.主备同步
History     :
"""

import unittest
import os
import time
import datetime
from yat.test import macro
from yat.test import Node
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Common import Common
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class RecoveryDelay(unittest.TestCase):
    commonshpri = CommonSH('PrimaryDbUser')
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info('----Opengauss_Reliability_Dbsys_Case242.py 开始执行------')
        self.db_primary_root_node = Node(node='PrimaryRoot')
        self.db_primary_user_node = Node(node='PrimaryDbUser')
        self.comsta = [Common('Standby1Root'), Common('Standby2Root')]
        self.db_standby_node =  \
            [Node(node='Standby1Root'), Node(node='Standby2Root')]
        self.comshsta = \
            [CommonSH('Standby1DbUser'), CommonSH('Standby2DbUser')]
        self.constant = Constant()
        self.db_disk_name = []
        self.tb_name = 'tb_241'
        self.conf_path = os.path.join(
            macro.DB_INSTANCE_PATH, macro.DB_PG_CONFIG_NAME)

        self.log.info('------同步集群时间--------')
        current = self.db_primary_root_node.sh(
            "date \"+%m/%d/%Y %H:%M:%S\"").result()
        self.log.info(current)
        datecmd = f'date -s "{current}"'
        self.log.info(datecmd)
        for i in range(2):
            result = self.db_standby_node[i].sh(datecmd).result()
            self.log.info(result)

        self.log.info('================获取备节点磁盘名=========')
        for i in range(2):
            self.db_disk_name.append(
                self.comsta[i].get_disk_name(macro.DB_INSTANCE_PATH))

    def test_recoverydelay(self):
        self.log.info('------备份postgres.conf文件----------')
        shell_cmd = f"cp {self.conf_path} {self.conf_path}_testbak"
        self.log.info(shell_cmd)
        result = self.db_primary_user_node.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('--------设置synchronous_commit=on-------')
        result = self.commonshpri.executeGsguc(
            'set', self.constant.GSGUC_SUCCESS_MSG,
            'synchronous_commit=on')
        self.assertTrue(result)

        self.log.info('---设置synchronous_standby_names=dn_6002,dn_6003--')
        shell_cmd = f"cat {self.conf_path} | " \
            f"grep synchronous_standby_names"
        result = self.db_primary_user_node.sh(shell_cmd).result()
        self.log.info(result)
        shell_cmd = f"sed -i \"s/" \
            f"{result.split('#')[0]}/synchronous_standby_names='" \
            f"{macro.DN_NODE_NAME.split('/')[1]}'" \
            f"/g\" {self.conf_path}"
        self.log.info(shell_cmd)
        result_tmp = self.db_primary_user_node.sh(shell_cmd).result()
        self.log.info(result_tmp)

        self.log.info('-----设置recovery_min_apply_delay=150s----')
        result = self.commonshpri.executeGsguc(
            'set', self.constant.GSGUC_SUCCESS_MSG,
            'recovery_min_apply_delay=150s')
        self.assertTrue(result)

        self.log.info('----------重启数据库-----------')
        result = self.commonshpri.stopDbCluster()
        self.assertTrue(result)
        result = self.commonshpri.startDbCluster()
        self.assertTrue(result)

        self.log.info('-----------查询参数-----------')
        result = self.commonshpri.executDbSql('show synchronous_commit;')
        self.log.info(result)
        self.assertIn('on', result)
        result = self.commonshpri.executDbSql(
            'show recovery_min_apply_delay;')
        self.log.info(result)
        self.assertIn('150s', result)
        result = self.commonshpri.executDbSql(
            'show synchronous_standby_names;')
        self.log.info(result)
        self.assertIn(f"{macro.DN_NODE_NAME.split('/')[1]}", result)

        self.log.info('--------查询集群同步方式-----')
        sql = "select * from pg_stat_replication;"
        result = self.commonshpri.executDbSql(sql)
        self.log.info(result)
        self.assertIn('Sync', result)
        self.assertIn('Async', result)

        self.log.info('--------等待主备一致------------')
        for i in range(2):
            result = self.comshsta[i].check_data_consistency()
            self.assertTrue(result)

        self.log.info('--------创建表，并插入数据-------')
        sql = f"drop table if exists {self.tb_name};" \
            f"create table {self.tb_name}(i int, s char(10));"
        result = self.commonshpri.executDbSql(sql)
        self.log.info(result)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS, result)

        self.log.info('-------------备节点1注入磁盘满---------------')
        start = datetime.datetime.now()
        cfedisk_cmd = f'{macro.CFE_PATH}/cfe ' \
            f'"inject  rfile_full (diskname) values  ({self.db_disk_name[0]})"'
        self.log.info(cfedisk_cmd)
        result = self.db_standby_node[0].sh(cfedisk_cmd).result()
        self.log.info(result)

        self.log.info('----------------等待20s清除故障------------')
        time.sleep(20)
        cfedisk_cmd = f'''{macro.CFE_PATH}/cfe "clean  
                rfile_full  where(diskname={self.db_disk_name[0]})"'''
        tmp = self.db_standby_node[0].sh(cfedisk_cmd).result()
        self.log.info(tmp)

        time.sleep(5)
        self.log.info('----------重启数据库-----------')
        result = self.commonshpri.stopDbCluster()
        self.log.info(result)
        result = self.commonshpri.startDbCluster(True)
        end = datetime.datetime.now()
        execute_time = (end - start).seconds
        self.log.info(execute_time)
        flg = 'Degraded' in result \
              or self.constant.START_SUCCESS_MSG in result
        self.assertTrue(flg)

        self.log.info('----------查询备节点-------------------')
        if 'Degraded' in result:
            time.sleep(180 - execute_time)
        sql = f"select sysdate;select * from {self.tb_name};"
        for i in range(2):
            result = self.comshsta[i].executDbSql(sql)
            self.log.info(result)
            self.assertIn('(0 rows)', result)

        self.log.info('---------------插入数据------------------')
        sql = f"insert into {self.tb_name} values(1,'test');"
        result = self.commonshpri.executDbSql(sql)
        self.log.info(result)
        self.assertIn('INSERT', result)

        self.log.info('-------------备节点均注入故障---------------')
        for i in range(2):
            cfedisk_cmd = f'{macro.CFE_PATH}/cfe ' \
                f'"inject  rfile_full (diskname) ' \
                f'values  ({self.db_disk_name[i]})"'
            self.log.info(cfedisk_cmd)
            result = self.db_standby_node[i].sh(cfedisk_cmd).result()
            self.log.info(result)

        self.log.info('--------------等待20s清除故障----------------------')
        start = datetime.datetime.now()
        time.sleep(20)
        for i in range(2):
            cfedisk_cmd = f'{macro.CFE_PATH}/cfe "clean rfile_full ' \
                f'where(diskname={self.db_disk_name[i]})"'
            tmp = self.db_standby_node[i].sh(cfedisk_cmd).result()
            self.log.info(tmp)

        self.log.info('----------重启数据库-----------')
        result = self.commonshpri.stopDbCluster()
        self.log.info(result)
        result = self.commonshpri.startDbCluster(True)
        end = datetime.datetime.now()
        execute_time = (end - start).seconds
        self.log.info(execute_time)
        flg = 'Degraded' in result \
              or self.constant.START_SUCCESS_MSG in result
        self.assertTrue(flg)

        self.log.info('----------查询备节点-------------------')
        if 'Degraded' in result:
            time.sleep(180 - execute_time)
        sql = f"select sysdate;select * from {self.tb_name};"
        for i in range(2):
            result = self.comshsta[i].executDbSql(sql)
            self.log.info(result)
            self.assertIn('1 | test', result)

    def tearDown(self):
        self.log.info('-----------------------------环境清理--------------------')
        self.log.info('----------------------清除故障---------------------------')
        for i in range(2):
            cfedisk_cmd = f'''{macro.CFE_PATH}/cfe "clean 
            rfile_full  where(diskname={self.db_disk_name[i]})"'''
            tmp = self.db_standby_node[i].sh(cfedisk_cmd).result()
            self.log.info(tmp)

        self.log.info('--------------还原配置文件-------------')
        shell_cmd = f"rm -rf {self.conf_path};" \
            f"cp {self.conf_path}_testbak {self.conf_path};" \
            f"rm -rf {self.conf_path}_testbak"
        self.log.info(shell_cmd)
        result = self.db_primary_user_node.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('-------------还原recovery_min_apply_delay----------')
        result = self.commonshpri.executeGsguc(
            'set', self.constant.GSGUC_SUCCESS_MSG,
            'recovery_min_apply_delay=0')
        self.log.info(result)

        self.log.info('-----------重启数据库-----------')
        result = self.commonshpri.stopDbCluster()
        self.log.info(result)
        result = self.commonshpri.startDbCluster()
        self.log.info(result)

        time.sleep(5)
        self.log.info('--------删除表-------')
        sql = f"drop table if exists {self.tb_name};"
        result = self.commonshpri.executDbSql(sql)
        self.log.info(result)
        self.log.info('---Opengauss_Reliability_Dbsys_Case242.py 执行结束-------')