'''

Case Type： 数据库系统
Case Name： 主节点数据恢复过程中，备节点磁盘不足
Create At:  2020/07/25
@zou_jialiang0505328126
Descption:  1.主节点进行数据恢复2.数据恢复过程中，备节点注入磁盘满故障3.恢复故障4.重启数据库5.查询数据库状态6.主节点数据恢复7.备节点执行查询指令，查询数据是否一致,对比每个表内容

history：
created： 2020/07/25  created by @wan005
'''
import os
import unittest
from yat.test import Node
import time
import threading
from yat.test import macro
import sys

sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.Common import Common
from testcase.utils.CommonSH import *

logger = Logger()
common = Common()
commonshpri = CommonSH('PrimaryDbUser')
commonshsta2 = CommonSH('Standby2DbUser')


class Disk_full(unittest.TestCase):
    dbPrimaryUserNode = Node(node='PrimaryDbUser')
    dbPrimaryRootNode = Node(node='PrimaryRoot')
    dbStandby1UserNode = Node(node='Standby1DbUser')
    dbStandby1RootNode = Node(node='Standby1Root')
    dbStandby2UserNode = Node(node='Standby2DbUser')
    dbStandby2RootNode = Node(node='Standby2Root')
    DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
    DB_ENV_PATH = macro.DB_ENV_PATH
    PRIMARY_NORMAL = Constant.PRIMARY_NORMAL
    DB_POSTMASTER_PID_NAME = macro.DB_POSTMASTER_PID_NAME
    CREATE_TABLE_SUCCESS = Constant.CREATE_TABLE_SUCCESS
    DROP_TABLE_SUCCESS = Constant.DROP_TABLE_SUCCESS
    dump_file_name = 'test_pid.tar'
    CFE_PATH = macro.CFE_PATH
    DB_DISK_NAME = ''
    DISK_FULL_MSG = Constant.DISK_FULL_MSG
    RESTORE_SUCCESS_MSG = Constant.RESTORE_SUCCESS_MSG
    RESTART_SUCCESS_MSG = Constant.RESTART_SUCCESS_MSG
    TPCC_CREATE_DATA_SUCCESS_MSG = Constant.TPCC_CREATE_DATA_SUCCESS_MSG
    TPCC_PATH = macro.TPCC_PATH
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        logger.info("-----------this is setup-----------")
        logger.info('------------Opengauss_Reliability_Dbsys_Case135.py start------------')

        logger.info("-----------create tpcc data-----------")
        destroy_shell = ". runDatabaseDestroy.sh props.pg"
        destroy_result = common.startTPCC(self.dbPrimaryRootNode, self.TPCC_PATH, destroy_shell)
        logger.info(destroy_result)

        build_shell = ". runDatabaseBuild.sh props.pg"
        destroy_result = common.startTPCC(self.dbPrimaryRootNode, self.TPCC_PATH, build_shell)
        logger.info(destroy_result)
        self.assertTrue(destroy_result.find(self.TPCC_CREATE_DATA_SUCCESS_MSG) > -1)

        logger.info('--------------------dump file-------------------------')
        dumpCmd = '''
            source {source_path};
            gs_dump {dbname} -p {port} -f {file_name} -F t'''.format(source_path=self.DB_ENV_PATH,
                                                                     dbname=self.dbPrimaryUserNode.db_name,
                                                                     port=self.dbPrimaryUserNode.db_port,
                                                                     file_name=os.path.join(self.DB_INSTANCE_PATH,
                                                                                            self.dump_file_name))
        logger.info(dumpCmd)
        dumpMsg = self.dbPrimaryUserNode.sh(dumpCmd).result()
        logger.info(dumpMsg)
        flag = 'dump database ' + self.dbPrimaryUserNode.db_name + ' successfully'
        self.assertTrue(dumpMsg.find(flag) > -1)

    def test_disk_full(self):
        logger.info('--------------------restore file-------------------------')
        dumpResult = commonshpri.restoreFile(os.path.join(self.DB_INSTANCE_PATH, self.dump_file_name))
        logger.info(dumpResult)
        self.assertTrue(dumpResult.find(self.RESTORE_SUCCESS_MSG) > -1)

        logger.info("-----------get standby disk name -----------")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        logger.info(cmd)
        msg = self.dbStandby1RootNode.sh(cmd).result()
        logger.info(msg)
        self.DB_DISK_NAME = msg.splitlines()[-1].split()[0].strip()

        logger.info("-----------standby disk full -----------")
        cfediskCmd = '''
            {cfe_path}/cfe  "inject  rfile_full (diskname)  values  ({diskname})"'''.format(cfe_path=self.CFE_PATH,
                                                                                            diskname=self.DB_DISK_NAME)
        logger.info(cfediskCmd)
        tmp = self.dbStandby1RootNode.sh(cfediskCmd).result()
        logger.info(tmp)
        time.sleep(50)

        logger.info("-----------clean disk-----------")
        cfediskCmd = '''
            {cfe_path}/cfe "clean  rfile_full  where(diskname={diskname})"'''.format(cfe_path=self.CFE_PATH,
                                                                                     diskname=self.DB_DISK_NAME)
        logger.info(cfediskCmd)
        tmp = self.dbStandby1RootNode.sh(cfediskCmd).result()
        logger.info(tmp)

        logger.info("-----------restart opengauss-----------")
        restartCmd = '''
            source {source_path};
            gs_ctl restart -D {DN1_PATH} -N all'''.format(source_path=self.DB_ENV_PATH, DN1_PATH=self.DB_INSTANCE_PATH)
        logger.info(restartCmd)
        restartMsg = self.dbPrimaryUserNode.sh(restartCmd).result()
        self.assertTrue(restartMsg.find(self.RESTART_SUCCESS_MSG) > -1)
        time.sleep(10)

        logger.info("-----------check opengauss status-----------")
        checkCmd = '''
            source {source_path};
            gs_om -t status --detail'''.format(source_path=self.DB_ENV_PATH)
        logger.info(checkCmd)
        checktMsg = self.dbPrimaryUserNode.sh(checkCmd).result()
        logger.info(checktMsg)
        self.assertTrue(checktMsg.find(self.PRIMARY_NORMAL) > -1)

        logger.info('--------------------recover file-------------------------')
        dumpMsg = commonshpri.restoreFile(os.path.join(self.DB_INSTANCE_PATH, self.dump_file_name))
        logger.info(dumpMsg)
        self.assertTrue(dumpMsg.find(self.RESTORE_SUCCESS_MSG) > -1)

        # 校验备机是否完成数据同步
        logger.info('校验备机是否完成数据同步')
        flag = commonshsta2.check_data_consistency()
        self.assertTrue(flag)

        logger.info("-----------check data consistency-----------")
        nodes_tuple = (self.dbPrimaryUserNode, self.dbStandby2UserNode)
        flag = common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = common.format_sql_result(commonshpri.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = common.check_data_sample_by_all(select_sql, *nodes_tuple)
            self.assertTrue(flag)

    def tearDown(self):
        logger.info('----------------this is tearDown-----------------------')
        logger.info('----------------delete dump file-----------------------')
        deleteCmd = '''
            rm -rf {filename};
            ls -al {filename}'''.format(filename=os.path.join(self.DB_INSTANCE_PATH, self.dump_file_name))
        logger.info(deleteCmd)
        tmp = self.dbPrimaryRootNode.sh(deleteCmd).result()
        logger.info(tmp)

        logger.info("-----------clean disk-----------")
        cfediskCmd = '''
            {cfe_path}/cfe "clean  rfile_full  where(diskname={diskname})"'''.format(cfe_path=self.CFE_PATH,
                                                                                     diskname=self.DB_DISK_NAME)
        logger.info(cfediskCmd)
        tmp = self.dbStandby1RootNode.sh(cfediskCmd).result()
        logger.info(tmp)
