'''

Case Type： 数据库系统
Case Name： 备节点数据磁盘不足，查看集群状态，在主节点进行数据插入，在主备上进行查询
Create At:  2020/07/25
@zou_jialiang0505328126
Descption:  1.备节点1注入磁盘不足故障2.查看集群状态3.主节点上创建表并插入数据4.备节点2执行查询指令，查看备节点2是否正常5.主节点进行数据恢复6.查询无故障备机2与主机数据是否一致

history：
created： 2020/07/25  created by @wan005
'''
import os
import unittest
from yat.test import Node
import time
import _thread
import queue
from yat.test import macro
import sys

sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.Common import Common
from testcase.utils.CommonSH import *

logger = Logger()
common = Common()
Constant = Constant()
commonshpri = CommonSH('PrimaryDbUser')
commonshsta1 = CommonSH('Standby1DbUser')
commonshsta2 = CommonSH('Standby2DbUser')


class Disk_full(unittest.TestCase):
    dbPrimaryUserNode = Node(node='PrimaryDbUser')
    dbPrimaryRootNode = Node(node='PrimaryRoot')
    dbStandby1UserNode = Node(node='Standby1DbUser')
    dbStandby1RootNode = Node(node='Standby1Root')
    dbStandby2UserNode = Node(node='Standby2DbUser')
    dbStandby2RootNode = Node(node='Standby2Root')
    DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
    DB_ENV_PATH = macro.DB_ENV_PATH
    STANDBY_NORMAL = Constant.STANDBY_NORMAL
    DB_POSTMASTER_PID_NAME = macro.DB_POSTMASTER_PID_NAME
    CREATE_TABLE_SUCCESS = Constant.CREATE_TABLE_SUCCESS
    DROP_TABLE_SUCCESS = Constant.DROP_TABLE_SUCCESS
    dump_file_name = 'test_pid.tar'
    CFE_PATH = macro.CFE_PATH
    DB_DISK_NAME = ''
    DISK_FULL_MSG = Constant.DISK_FULL_MSG
    RESTORE_SUCCESS_MSG = Constant.RESTORE_SUCCESS_MSG
    START_SUCCESS_MSG = Constant.START_SUCCESS_MSG
    CLUSTER_DEGRADED_MSG = Constant.CLUSTER_DEGRADED_MSG
    CLUSTER_NORMAL_MSG = Constant.CLUSTER_NORMAL_MSG
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        logger.info("-----------this is setup-----------")
        logger.info('------------Opengauss_Reliability_Dbsys_Case136.py start------------')
        logger.info('--------------------dump file-------------------------')
        dumpCmd = '''
            source {source_path};
            gs_dump {dbname} -p {port} -f {file_name} -F t'''.format(source_path=self.DB_ENV_PATH,
                                                                     dbname=self.dbPrimaryUserNode.db_name,
                                                                     port=self.dbPrimaryUserNode.db_port,
                                                                     file_name=os.path.join(self.DB_INSTANCE_PATH,
                                                                                            self.dump_file_name))
        logger.info(dumpCmd)
        dumpMsg = self.dbPrimaryUserNode.sh(dumpCmd).result()
        logger.info(dumpMsg)
        flag = 'dump database ' + self.dbPrimaryUserNode.db_name + ' successfully'
        self.assertTrue(dumpMsg.find(flag) > -1)

    def test_disk_full(self):
        logger.info("-----------get standby disk name -----------")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        logger.info(cmd)
        msg = self.dbStandby1RootNode.sh(cmd).result()
        logger.info(msg)
        self.DB_DISK_NAME = msg.splitlines()[-1].split()[0].strip()

        logger.info("-----------standby1 disk full -----------")
        cfediskCmd = '''
            {cfe_path}/cfe  "inject  rfile_full (diskname)  values  ({diskname})"'''.format(cfe_path=self.CFE_PATH,
                                                                                            diskname=self.DB_DISK_NAME)
        logger.info(cfediskCmd)
        tmp = self.dbStandby1RootNode.sh(cfediskCmd).result()
        logger.info(tmp)
        time.sleep(5)

        logger.info("-----------standby1 disk space -----------")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        logger.info(cmd)
        msg = self.dbStandby1RootNode.sh(cmd).result()
        logger.info(msg)

        logger.info("-----------Primary check opengauss status-----------")
        checkCmd = '''
            source {source_path};
            gs_om -t status --detail'''.format(source_path=self.DB_ENV_PATH)
        checktMsg = self.dbPrimaryUserNode.sh(checkCmd).result()
        logger.info(checktMsg)

        logger.info("----------- insert data ,standbyhost1 stop-----------")
        SqlMdg = commonshpri.executDbSql(
            'drop table if exists test_136;create table test_136(id int,name varchar(20));insert into test_136 values(generate_series(1,10000000),\'test - \'|| generate_series(1,10000000));')
        logger.info(SqlMdg)

        logger.info("-----------standby1 disk space -----------")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        logger.info(cmd)
        msg = self.dbStandby1RootNode.sh(cmd).result()
        logger.info(msg)

        logger.info("-----------Primary recheck opengauss status-----------")
        checktMsg = self.dbPrimaryUserNode.sh(checkCmd).result()
        logger.info(checktMsg)
        self.assertTrue(checktMsg.find(self.CLUSTER_DEGRADED_MSG) > -1)

        logger.info("-----------standbyhost1 recheck opengauss status-----------")
        checkCmd = '''
            source {source_path};
            gs_om -t status --detail'''.format(source_path=self.DB_ENV_PATH)
        checktMsg = self.dbStandby1UserNode.sh(checkCmd).result()
        logger.info(checktMsg)
        self.assertTrue(checktMsg.find(self.DISK_FULL_MSG) > -1)

        # 校验备机是否完成数据同步
        logger.info('校验备机是否完成数据同步')
        flag = commonshsta2.check_data_consistency()
        self.assertTrue(flag)

        logger.info('--------------------standbyhost2 check data consistency--------------------------')
        SqlMdg = commonshsta2.executDbSql('select count(*) from test_136;')
        logger.info(SqlMdg)
        self.assertIn('10000000', SqlMdg)

        logger.info("-----------check data consistency-----------")
        nodes_tuple = (self.dbPrimaryUserNode, self.dbStandby2UserNode)
        flag = common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = common.format_sql_result(commonshpri.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = common.check_data_sample_by_all(select_sql, *nodes_tuple)
            self.assertTrue(flag)

        logger.info('--------------------recover file-------------------------')
        dumpCmd = '''
            source {source_path};
            gs_restore -U '{username}' -W '{password}'  -p {port} -d {dbname}  '{file_name}' -c'''.format(
            source_path=self.DB_ENV_PATH, username=self.dbPrimaryUserNode.db_user,
            password=self.dbPrimaryUserNode.db_password, port=self.dbPrimaryUserNode.db_port,
            dbname=self.dbPrimaryUserNode.db_name, file_name=os.path.join(self.DB_INSTANCE_PATH, self.dump_file_name))
        logger.info(dumpCmd)
        dumpMsg = self.dbPrimaryUserNode.sh(dumpCmd).result()
        logger.info(dumpMsg)
        self.assertTrue(dumpMsg.find(self.RESTORE_SUCCESS_MSG) > -1)

        # 校验备机是否完成数据同步
        logger.info('校验备机是否完成数据同步')
        flag = commonshsta2.check_data_consistency()
        self.assertTrue(flag)

        logger.info("-----------check data consistency-----------")
        nodes_tuple = (self.dbPrimaryUserNode, self.dbStandby2UserNode)
        flag = common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = common.format_sql_result(commonshpri.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = common.check_data_sample_by_all(select_sql, *nodes_tuple)
            self.assertTrue(flag)

        logger.info("-----------clean Standby1 disk-----------")
        cfediskCmd = '''
            {cfe_path}/cfe "clean  rfile_full  where(diskname={diskname})"'''.format(cfe_path=self.CFE_PATH,
                                                                                     diskname=self.DB_DISK_NAME)
        logger.info(cfediskCmd)
        tmp = self.dbStandby1RootNode.sh(cfediskCmd).result()
        logger.info(tmp)

        logger.info("-----------restart cluster-----------")
        restartCmd = '''
            source {source_path};
            gs_om -t stop;gs_om -t start;'''.format(source_path=self.DB_ENV_PATH,
                                                    DN1_PATH=self.DB_INSTANCE_PATH)
        restartMsg = self.dbStandby1UserNode.sh(restartCmd).result()
        logger.info(restartMsg)
        time.sleep(1)
        logger.info("-----------check opengauss status till normal-----------")
        self.statusMsg = commonshpri.getDbClusterStatus('detail')
        logger.info(self.statusMsg)
        for i in range(10):
            if 'Catchup' in self.statusMsg or 'Standby Need repair' in self.statusMsg:
                time.sleep(10)
                self.statusMsg = commonshpri.getDbClusterStatus('detail')
                logger.info(self.statusMsg)
            else:
                break
            time.sleep(1)

        # 校验备机是否完成数据同步
        logger.info('校验备机是否完成数据同步')
        flag = commonshsta1.check_data_consistency()
        self.assertTrue(flag)

        if self.CLUSTER_NORMAL_MSG in self.statusMsg:
            logger.info('--------------------standbyhost1 check data consistency--------------------------')
            SqlMdg = commonshsta1.executDbSql('select count(*) from test_136;')
            logger.info(SqlMdg)
            self.assertIn('10000000', SqlMdg)
        else:
            logger.error("-----------Standby1 can't be normal-----------")

    def tearDown(self):
        logger.info('----------------this is tearDown-----------------------')

        logger.info("-----------clean Standby1 disk-----------")
        cfediskCmd = '''
            {cfe_path}/cfe "clean  rfile_full  where(diskname={diskname})"'''.format(cfe_path=self.CFE_PATH,
                                                                                     diskname=self.DB_DISK_NAME)
        logger.info(cfediskCmd)
        tmp = self.dbStandby1RootNode.sh(cfediskCmd).result()
        logger.info(tmp)

        self.statusMsg = commonshpri.getDbClusterStatus('detail')
        logger.info(self.statusMsg)
        # 如果备机1状态还是不正常，全量重建备机
        if 'Catchup' in self.statusMsg or 'Standby Need repair' in self.statusMsg:
            logger.info("-----------build Standby1-----------")
            buildMsg = commonshsta1.executeGsctl('build', Constant.REBUILD_SUCCESS_MSG, '-b full')
            logger.info(buildMsg)
            if buildMsg:
                logger.info("build success")
            else:
                logger.error("build failed")

        logger.info('----------------delete dump file-----------------------')
        deleteCmd = '''
            rm -rf {filename};
            ls -al {filename}'''.format(filename=os.path.join(self.DB_INSTANCE_PATH, self.dump_file_name))
        tmp = self.dbPrimaryRootNode.sh(deleteCmd).result()
        logger.info(tmp)

        logger.info('--------------------drop table-------------------------')
        SqlMdg = commonshpri.executDbSql('drop table test_136;')
        logger.info(SqlMdg)
        self.assertIn(Constant.DROP_TABLE_SUCCESS, SqlMdg)
