"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : guc-query-unique_sql自动淘汰
Case Name   : 自动淘汰hash表条目满时注入磁盘满故障，恢复后触发自动淘汰
Create At   : 2021/11/19
Owner       : @wan005
Description :
    1、修改参数重启使其生效，并校验其预期结果；
    2、清空记录，执行100 unique_sql，查看hash table记录条数
    3、注入故障，数据库状态异常
    4、恢复故障
    5、执行100+1 unique sql 查看记录条数，观察自动淘汰表现
    6、恢复默认值；
Expect      :
    1、参数修改成功，校验修改后系统参数
    2、执行成功 查看hash table记录条数为100
    3、注入故障成功 集群状态异常
    4、恢复故障成功 重启恢复 集群状态恢复
    5. hash表因重启被清空，重新执行达到100 再触发自动淘汰 查询记录条数为91
    6、恢复默认值成功
History     :
    modified: 2022/10/11 modified by wx1115623,修改sql语句，确保unique_sql查询结果达到100
"""
import os
import unittest

from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node, macro


class GucTestCase(unittest.TestCase):
    nodes = ("PrimaryDbUser", "Standby1DbUser", "Standby2DbUser")

    @RestartDbCluster(*nodes)
    def setUp(self):
        self.log = Logger()
        self.primary_sh = CommonSH(self.nodes[0])
        self.primary_root_node = Node(node="PrimaryRoot")
        self.constant = Constant()
        self.user_node = Node(node="PrimaryDbUser")
        self.com = Common()
        self.log.info("setUp-实例化:ok")

        self.log.info("Opengauss_Reliability_Hardware_Case157:开始执行")

        self.log.info("获取磁盘名称;expect:成功")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        self.log.info(cmd)
        msg = self.primary_root_node.sh(cmd).result()
        self.log.info(msg)
        self.disk_name = msg.splitlines()[-1].split()[0].strip()
        self.inject_cmd = f"rfile_full (diskname) values ({self.disk_name})"
        self.clean_command = f"rfile_full where diskname={self.disk_name}"

        text = "备份postgresql.conf文件"
        self.log.info(text)
        self.file = os.path.join(macro.DB_INSTANCE_PATH,
                                 macro.DB_PG_CONFIG_NAME)
        result = self.user_node.sh(f"cp {self.file} "
                                   f"{self.file}backup").result()
        self.log.info(result)

    def test_main(self):
        text = "--step1:设置参数并重启生效;expect:成功"
        self.log.info(text)
        param_list = ["enable_auto_clean_unique_sql=on",
                      "enable_resource_track=on", "use_workload_manager=on",
                      "instr_unique_sql_count=100"]
        for param in param_list:
            re = self.primary_sh.executeGsguc("set",
                                              self.constant.GSGUC_SUCCESS_MSG,
                                              param)
            self.log.info(re)
            self.assertTrue(re, "执行失败:" + text)

        result = self.primary_sh.stopDbCluster()
        self.assertTrue(result)
        result = self.primary_sh.startDbCluster(get_detail=True)
        self.log.info(result)

        text = "--step2:清空记录，执行100 unique_sql，查看hash table记录条数; expect:100"
        self.log.info(text)
        result = self.primary_sh.executDbSql(
            "select reset_unique_sql('global','all',100);"
            "select count(*) from dbe_perf.statement;")
        self.log.info(result)
        self.assertIn("t\n", result, "执行失败:" + text)
        self.assertIn("1\n", result, "执行失败:" + text)
        for j in range(48):
            tablename = f"unique_table_{j}"
            result = self.primary_sh.executDbSql(
                f"drop table if exists {tablename};"
                f"create table {tablename}(a int, b int);"
                f"insert into {tablename} values(1,2);"
                f"drop table if exists {tablename};")
            self.log.info(result)
            self.assertNotIn("ERROR", result, "执行失败:" + text)
        result = self.primary_sh.executDbSql("select count(*) "
                                             "from dbe_perf.statement;")
        self.log.info(result)
        self.assertIn("100\n", result, "执行失败:" + text)

        try:
            text = "--step3:注入磁盘满故障; expect:成功"
            self.log.info(text)
            result = self.com.cfe_inject(self.primary_root_node,
                                         self.inject_cmd)
            self.log.info(result)
            self.assertIn("successful execution", result, "执行失败:" + text)

        except Exception as error:
            self.log.info("try异常信息:" + str(error))

        finally:
            text = "--step4:清除故障;expect:成功"
            self.log.info(text)
            result = self.com.cfe_clean(self.primary_root_node,
                                        self.clean_command)
            self.log.info(result)
            self.assertIn("successful execution", result, "执行失败:" + text)
            result = self.primary_sh.stopDbCluster()
            self.log.info(result)
            result = self.primary_sh.startDbCluster()
            self.log.info(result)
            self.log.info("故障已清理")

        db_status = self.primary_sh.getDbClusterStatus("detail")
        self.log.info(db_status)
        self.assertIn("cluster_state   : Normal", db_status, "执行失败" + text)

        text = "--step5:故障清理后，再次触发自动淘汰;expect:成功"
        self.log.info(text)
        result = self.primary_sh.executDbSql("select count(*) "
                                             "from dbe_perf.statement;")
        self.log.info(result)
        self.assertNotIn("ERROR", result, "执行失败:" + text)
        num = result.splitlines()[-2].strip()
        self.assertGreater(100, int(num))

        for j in range(46):
            tablename = f"unique_table_{j}"
            result = self.primary_sh.executDbSql(
                f"drop table if exists {tablename};"
                f"create table {tablename}(a int, b int);"
                f"drop table if exists {tablename};")
            self.log.info(result)
            self.assertNotIn("ERROR", result, "执行失败:" + text)

        result = self.primary_sh.executDbSql("select count(*) "
                                             "from dbe_perf.statement;")
        self.log.info(result)
        if not "100\n" in result:
            num = result.splitlines()[-2].strip()
            table_list = ["pg_extension", "pg_index", "pg_inherits",
                          "pg_class", "pg_proc", "pg_cast", "pg_object"]
            for i in range(100 - int(num)):
                sql = f"select count(*) from {table_list[i]}"
                res = self.primary_sh.executDbSql(sql)
                self.log.info(res)
                self.assertNotIn("ERROR", res)

            result = self.primary_sh.executDbSql("select count(*) "
                                                 "from dbe_perf.statement;")
            self.log.info(result)
            self.assertIn("100\n", result, "执行失败:" + text)

        result = self.primary_sh.executDbSql(
            "select count(*) from dbe_perf.statement limit 1;")
        self.log.info(result)
        self.assertIn("91\n", result, "执行失败:" + text)

        text = "运行TPCC;expect:成功"
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result, "执行失败" + text)

        text = "检查主备是否同步;expect:成功"
        self.log.info(text)
        res = self.primary_sh.check_data_consistency()
        self.assertTrue(res, "执行失败" + text)

        text = "检查数据一致性;expect:成功"
        self.log.info(text)
        nodes = (Node(self.nodes[0]), Node(self.nodes[1]),
                 Node(self.nodes[2]))
        flag = self.com.check_data_sample_by_all(r'\d', *nodes)
        self.assertTrue(flag, "执行失败" + text)
        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f"select count(*) from {name};"
            flag = self.com.check_data_sample_by_all(select_sql, *nodes)
            self.assertTrue(flag, "执行失败" + text)

    def tearDown(self):
        text = "--step6:环境清理;expect:成功"
        self.log.info(text)
        clean_result = self.com.cfe_clean(self.primary_root_node,
                                          self.clean_command)
        self.log.info(clean_result)

        self.log.info("恢复postgresql.conf文件")
        mv_result = self.user_node.sh(f"mv {self.file}backup "
                                      f"{self.file}").result()
        self.log.info(mv_result)

        result = self.primary_sh.stopDbCluster()
        self.log.info(result)
        result = self.primary_sh.startDbCluster(get_detail=True)
        self.log.info(result)

        db_status = self.primary_sh.getDbClusterStatus("detail")
        self.log.info(db_status)

        result = Node(self.nodes[1]).sh("date").result()
        self.assertNotIn("Too many open files", result, "执行失败" + text)
        self.assertEqual("", mv_result, "执行失败:" + text)
        self.assertIn("cluster_state   : Normal", db_status, "执行失败" + text)
        self.log.info("Opengauss_Reliability_Hardware_Case157:执行完成")
