"""
Case Type   : 故障&可靠性测试
Case Name   : 删除数据的过程中恢复不同数据库
Create At   : 2020/11/23
Owner       : @zou_jialiang0505328126
Description :
    1.创建数据库tpcc，在数据库tpcc中创建表
    2.删除数据
    3.删除数据过程中恢复postgres数据库
    4.检查数据一致性
    5.执行TPCC
    6.检查数据一致性
Expect      :
    1.创建表成功
    2.删除数据成功
    3.恢复成功
    4.数据一致
    5.执行tpcc成功
    6.数据一致
History     :
    modified: 2020/03/01  modified by @peilinqian,修改创建数据库SQL；以防已存在库导致断言失败
    modified: 2020/12/11  modified by @wan005,修改恢复数据库为新创建数据库，防止恢复失败导致后续用例失败
    modified: 2021/01/08  modified by @wan005,物化视图不支持compression等参数，修改断言适配1.1.0版本
    modified: 2021/07/05  modified by @wan0050,适当减少数据量，测试执行前增加主备同步检查
    modified: 2021/07/21  modified by @wan005,修改recovery_max_workers=8，
    并增加同步等待时长，规避环境性能差问题
"""

import os
import unittest
import time
import sys
from yat.test import macro
from yat.test import Node

sys.path.append(sys.path[0] + "/../")
from testcase.utils.CommonSH import *
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.ComThread import ComThread

Primary_SH = CommonSH('PrimaryDbUser')


@unittest.skipIf(1 == Primary_SH.get_node_num(), '单机环境不执行')
class Basebackup(unittest.TestCase):
    commonshsta = CommonSH('Standby1DbUser')
    commonshsta2 = CommonSH('Standby2DbUser')
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info('Opengauss_Reliability_Tools_Restore_Case015.py 开始执行')
        self.dbPrimaryDbUser = Node(node='PrimaryDbUser')
        self.dbStandby1DbUser = Node(node='Standby1DbUser')
        self.dbStandby2DbUser = Node(node='Standby2DbUser')
        self.dbPrimaryRootNode = Node(node='PrimaryRoot')
        self.Constant = Constant()
        self.common = Common('PrimaryRoot')
        self.restore_file = os.path.join(macro.DB_BACKUP_PATH, 'dump_backup', 'tools_restore_case001.tar')
        self.sql_path = os.path.join(macro.DB_BACKUP_PATH, 'testscript')
        self.tblname = 'restore_case015'
        self.values = '150000'

        result = Primary_SH.executDbSql('show recovery_max_workers;')
        self.log.info(f"recovery_max_workers is {result}")
        self.recovery_max_workers = result.strip().splitlines()[-2]
        result = Primary_SH.executeGsguc(
            "set", self.Constant.GSGUC_SUCCESS_MSG, "recovery_max_workers=8")
        self.assertTrue(result)
        result = Primary_SH.stopDbCluster()
        self.assertTrue(result)
        result = Primary_SH.startDbCluster()
        self.assertTrue(result)

        status = Primary_SH.getDbClusterStatus()
        self.log.info(status)
        self.assertTrue("Degraded" in status or "Normal" in status)

        self.log.info('创建备份路径')
        shell_cmd = f'rm -rf {os.path.dirname(self.restore_file)}; mkdir {os.path.dirname(self.restore_file)}'
        self.log.info(shell_cmd)
        result = self.dbPrimaryDbUser.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('生成预设数据')
        result = self.common.scp_file(self.dbPrimaryDbUser, 'restore_data.sql', self.sql_path)
        self.log.info(result)
        sql_file_cmd = f'''
                                source {macro.DB_ENV_PATH};    
                                gsql -d {self.dbPrimaryDbUser.db_name} -p {self.dbPrimaryDbUser.db_port}  -f {os.path.join(
            self.sql_path, 'restore_data.sql')}
                                '''
        self.log.info(sql_file_cmd)
        sql_bx_msg = self.dbPrimaryDbUser.sh(sql_file_cmd).result()
        self.log.info(sql_bx_msg)
        self.assertNotIn('ERROR', sql_bx_msg)

        self.log.info('导出数据')
        result = Primary_SH.dumpFile(filename=self.restore_file,
                                     get_detail=False)
        self.log.info(result)
        self.assertTrue(result)

        self.dbname = 'dbsys_restore_db'
        self.log.info("创建数据库")
        create_db_sql = f'drop database if exists {self.dbname};' \
                        f'create database {self.dbname};'
        result = Primary_SH.executDbSql(create_db_sql)
        self.log.info(result)
        self.assertIn(self.Constant.CREATE_DATABASE_SUCCESS_MSG, result)

    def test_basebackup(self):
        self.log.info('创建表插入数据(postgres)')
        sql = f'''create table {self.tblname}(i int)\
                partition by range (i)\
                (partition p1 values less than (50000),\
                partition p2 values less than (100000),\
                partition p3 values less than (150001)\
                ); insert into {self.tblname} \
                (select * from generate_series(1,{self.values}));'''
        result = Primary_SH.executDbSql(f'{sql}', dbname=self.dbname)
        self.log.info(result)
        self.assertIn('CREATE', result)

        self.log.info('获取还原前数据库已有表')
        self.table_list = Primary_SH.executDbSql(r'\d')
        self.log.info(self.table_list)

        self.log.info('测试前检查主备是否同步')
        result = self.commonshsta.check_data_consistency()
        self.assertTrue(result)
        result = self.commonshsta2.check_data_consistency()
        self.assertTrue(result)

        self.log.info('启动线程删除表(tpcc)')
        delete_thread = ComThread(Primary_SH.executDbSql,
                                  args=(f'delete from {self.tblname} '
                                        f'where i>100000;', self.dbname))
        delete_thread.setDaemon(True)
        delete_thread.start()

        self.log.info('恢复数据')
        result = Primary_SH.restoreFile(self.restore_file,
                                        get_detail=True,
                                        dbname=self.dbname)
        self.log.info(result)
        self.assertIn(self.Constant.RESTORE_SUCCESS_MSG, result)

        self.log.info('检查删除结果')
        delete_thread.join(60 * 60)
        result = delete_thread.get_result()
        self.log.info(result)
        self.assertIn('DELETE', result)

        self.log.info('对比恢复前后数据是否一致')
        time.sleep(5)
        self.table_list_after = Primary_SH.executDbSql(r'\d')
        self.log.info(self.table_list_after)
        self.assertIn(self.table_list, self.table_list_after)

        self.log.info('检查主备是否同步')
        result = self.commonshsta.check_data_consistency()
        if not result:
            result = self.commonshsta.check_data_consistency()
        self.assertTrue(result)
        result = self.commonshsta2.check_data_consistency()
        if not result:
            result = self.commonshsta2.check_data_consistency()
        self.assertTrue(result)

        self.log.info('检查数据一致性')
        nodes_tuple = (self.dbPrimaryDbUser, self.dbStandby1DbUser, self.dbStandby2DbUser)
        flag = self.common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.common.format_sql_result(
            Primary_SH.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.common.check_data_sample_by_all(select_sql, *nodes_tuple)
            self.assertTrue(flag)

        self.log.info('启动TPCC')
        result = self.common.startTPCC(self.dbPrimaryRootNode, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.Constant.TPCC_SUCCESS_MSG, result)

        self.log.info('检查主备是否同步')
        result = self.commonshsta.check_data_consistency()
        if not result:            
            result = self.commonshsta.check_data_consistency()
        self.assertTrue(result)
        result = self.commonshsta2.check_data_consistency()
        if not result:
            result = self.commonshsta2.check_data_consistency()
        self.assertTrue(result)

        self.log.info('检查数据一致性')
        nodes_tuple = (self.dbPrimaryDbUser, self.dbStandby1DbUser, self.dbStandby2DbUser)
        flag = self.common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.common.format_sql_result(
            Primary_SH.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.common.check_data_sample_by_all(select_sql, *nodes_tuple)
            self.assertTrue(flag)

    def tearDown(self):
        self.log.info('环境清理')
        self.log.info('删除备份文件')
        shell_cmd = f'rm -rf {os.path.dirname(self.restore_file)}'
        self.log.info(shell_cmd)
        result = self.dbPrimaryDbUser.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('删除预设数据')
        result = self.common.scp_file(self.dbPrimaryDbUser, 'clear_restore_data.sql', self.sql_path)
        self.log.info(result)
        sql_file_cmd = f'''
                                       source {macro.DB_ENV_PATH};
                                       gsql -d {self.dbPrimaryDbUser.db_name} -p {self.dbPrimaryDbUser.db_port}  -f {os.path.join(
            self.sql_path, 'clear_restore_data.sql')}
        '''
        self.log.info(sql_file_cmd)
        sql_bx_msg = self.dbPrimaryDbUser.sh(sql_file_cmd).result()
        self.log.info(sql_bx_msg)

        self.log.info('删除执行脚本路径')
        shell_cmd = f'rm -rf {self.sql_path}'
        self.log.info(shell_cmd)
        result = self.dbPrimaryDbUser.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('删除表')
        result = Primary_SH.executDbSql(
            f'drop table if exists {self.tblname};')
        self.log.info(result)

        self.log.info("drop database")
        result = Primary_SH.executDbSql(f'drop database {self.dbname};')
        self.log.info(result)
        self.log.info("-------------恢复参数---------------")
        Primary_SH.executeGsguc(
            "set", self.Constant.GSGUC_SUCCESS_MSG,
            f"recovery_max_workers={self.recovery_max_workers}")
        Primary_SH.stopDbCluster()
        Primary_SH.startDbCluster()

        status = Primary_SH.getDbClusterStatus()
        self.assertTrue("Degraded" in status or "Normal" in status)
        self.log.info('Opengauss_Reliability_Tools_Restore_Case015.py 执行结束')
