"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : 故障&可靠性测试
Case Name   : 恢复过程中向有唯一约束的相同表中插入重复数据
Create At   : 2020/11/23
@zou_jialiang0505328126
Description :
    1.创唯一约束建表，并插入数据,创建索引
    2.使用gs_dump导出该表
    3. 使用gs_restore恢复该表
    4. 恢复的过程中对该表插入数据（部分数据重复）
    5.检查数据一致性
    6.执行tpcc
    7.检查数据一致性
Expect      :
    1.创建表并插入数据成功
    2.导出数据成功
    3.恢复成功
    4.插入失败，提示数据重复
    5.数据一致，插入数据成功
    6.执行tpcc成功
    7.数据一致
History     :
    modified: 2021/01/27  modified by @wan005,修改启动tpcc用户
    modified: 2022/08/30  modified by wx1115623,增加等待时间，等待数据恢复后再插入重复数据
"""

import os
import unittest
import time
from yat.test import macro
from yat.test import Node
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import RestartDbCluster


class Basebackup(unittest.TestCase):
    commonshpri = CommonSH('PrimaryDbUser')
    commonshsta = CommonSH('Standby1DbUser')
    commonshsta2 = CommonSH('Standby2DbUser')
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.dbPrimaryDbUser = Node(node='PrimaryDbUser')
        self.dbStandby1DbUser = Node(node='Standby1DbUser')
        self.dbStandby2DbUser = Node(node='Standby2DbUser')
        self.db_primary_root_node = Node(node='PrimaryRoot')
        self.Constant = Constant()
        self.common = Common('PrimaryRoot')
        self.restore_file = os.path.join(macro.DB_BACKUP_PATH, 'dump_backup',
                                         'tools_restore_case001.tar')
        self.sql_path = os.path.join(macro.DB_BACKUP_PATH, 'testscript')
        self.tblname = 'restore_case010'
        self.values = '2000000'
        self.idxname = 'restore_case010_idx'

    def test_basebackup(self):
        self.log.info('-----创建备份路径------')
        shell_cmd = f'rm -rf {os.path.dirname(self.restore_file)}; ' \
            f'mkdir {os.path.dirname(self.restore_file)}'
        self.log.info(shell_cmd)
        result = self.dbPrimaryDbUser.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('-----step1:创建表，插入数据,创建索引;'
                      'expect:创建表并插入数据成功-----')
        sql = f'create table {self.tblname}(i int unique); ' \
            f'create index {self.idxname} on {self.tblname}(i); \
        insert into {self.tblname} (select * from ' \
            f'generate_series(1,{self.values}));'
        result = self.commonshpri.executDbSql(sql)
        self.log.info(result)
        self.assertIn(self.Constant.CREATE_INDEX_SUCCESS_MSG, result)

        self.log.info('-----step2:使用gs_dump导出该表;expect:导出数据成功-----')
        result = self.commonshpri.dumpFile(filename=self.restore_file,
                                           cmd=f'-F t -t {self.tblname}',
                                           get_detail=False)
        self.log.info(result)
        self.assertTrue(result)

        self.log.info('-----获取还原前数据库已有表-----')
        self.table_list = self.commonshpri.executDbSql(r'\d')
        self.log.info(self.table_list)

        self.log.info('-----step3:使用gs_restore恢复该表;expect:恢复成功-----')
        restore_thread = ComThread(self.commonshpri.restoreFile,
                                   args=(self.restore_file,))
        restore_thread.setDaemon(True)
        restore_thread.start()

        self.log.info('-----step4:恢复的过程中对该表插入数据（部分数据重复）;'
                      'expect:插入失败，提示数据重复-----')
        time.sleep(5)
        start_value = int(self.values) - 400
        end_value = int(self.values) * 2
        result = self.commonshpri.executDbSql(
            f'insert into {self.tblname} (select * from '
            f'generate_series({str(start_value)},{str(end_value)}));')
        self.log.info(result)
        self.assertIn('ERROR:  duplicate key value violates unique constraint',
                      result)

        self.log.info('-----检查恢复数据结果-----')
        restore_thread.join(60 * 10)
        result = restore_thread.get_result()
        self.assertIn(self.Constant.RESTORE_SUCCESS_MSG, result)

        self.log.info('-----对比恢复前后数据是否一致-----')
        time.sleep(5)
        self.table_list_after = self.commonshpri.executDbSql(r'\d')
        self.log.info(self.table_list_after)
        self.assertIn(self.table_list, self.table_list_after)

        self.log.info('-----检查主备是否同步-----')
        result = self.commonshsta.check_data_consistency()
        self.assertTrue(result)
        result = self.commonshsta2.check_data_consistency()
        self.assertTrue(result)

        self.log.info('-----step5:检查数据一致性;expect:数据一致-----')
        nodes_tuple = (self.dbPrimaryDbUser, self.dbStandby1DbUser,
                       self.dbStandby2DbUser)
        flag = self.common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.common.format_sql_result(
            self.commonshpri.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.common.check_data_sample_by_all(select_sql,
                                                        *nodes_tuple)
            self.assertTrue(flag)

        self.log.info('-----step6:执行tpcc;expect:执行tpcc成功-----')
        result = self.common.startTPCC(
            self.db_primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.Constant.TPCC_SUCCESS_MSG, result)

        self.log.info('-----检查主备是否同步-----')
        result = self.commonshsta.check_data_consistency()
        self.assertTrue(result)
        result = self.commonshsta2.check_data_consistency()
        self.assertTrue(result)

        self.log.info('-----step7:检查数据一致性;expect:数据一致-----')
        nodes_tuple = (self.dbPrimaryDbUser, self.dbStandby1DbUser,
                       self.dbStandby2DbUser)
        flag = self.common.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.common.format_sql_result(
            self.commonshpri.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.common.check_data_sample_by_all(select_sql,
                                                        *nodes_tuple)
            self.assertTrue(flag)

    def tearDown(self):
        self.log.info('-----环境清理-----')
        self.log.info('-----删除备份文件-----')
        shell_cmd = f'rm -rf {os.path.dirname(self.restore_file)}'
        self.log.info(shell_cmd)
        result = self.dbPrimaryDbUser.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('-----删除执行脚本路径-----')
        shell_cmd = f'rm -rf {self.sql_path}'
        self.log.info(shell_cmd)
        result = self.dbPrimaryDbUser.sh(shell_cmd).result()
        self.log.info(result)

        self.log.info('-----删除表-----')
        result = self.commonshpri.executDbSql(
            f'drop index if exists {self.idxname};'
            f'drop table if exists {self.tblname};')
        self.log.info(result)
        self.assertIn(self.Constant.TABLE_DROP_SUCCESS, result, '删除表失败')
        self.assertIn(self.Constant.drop_index_success_msg, result, '删除索引失败')
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
