"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.

openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:

          http://license.coscl.org.cn/MulanPSL2

THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type   : guc-query-unique_sql自动淘汰
Case Name   : kill -19 后执行unique 执行reset kill -18 查看自动淘汰hash table记录条数
Create At   : 2021/11/19
Owner       : @wan005
Description :
    1、修改enable_auto_clean_unique_sql为on，重启使其生效，并校验其预期结果；
    2、清空记录，执行100 unique_sql，查看hash table记录条数
    3、执行kill -19挂住主节点
    4、2线程同时执行reset和unique_sql
    5、执行kill -18解挂主节点 观察自动淘汰情况
    6、环境清理
Expect      :
    1、参数修改成功，校验修改后系统参数
    2、执行成功 查看hash table记录条数为100
    3、挂住主节点成功
    4、启线程成功
    5、解挂主节点成功 查询记录条数为1或2
    6、环境清理成功
History     :
    modified: 2022/08/30 modified by wx1115623,注入故障kill -19后添加对Degraded状态的断言，
              在tearDown中添加kill -18解挂主节点步骤，规避故障未清除导致数据库状态异常的情况
    modified: 2022/10/11 modified by wx1115623,修改sql语句，确保unique_sql查询结果达到100
"""
import os
import time
import unittest
from testcase.utils.ComThread import ComThread
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node, macro


class GucTestCase(unittest.TestCase):
    nodes = ("PrimaryDbUser", "Standby1DbUser", "Standby2DbUser")

    @RestartDbCluster(*nodes)
    def setUp(self):
        self.log = Logger()
        self.primary_sh = CommonSH(self.nodes[0])
        self.primary_root_node = Node(node="PrimaryRoot")
        self.user_node = Node(node="PrimaryDbUser")
        self.constant = Constant()
        self.com = Common()
        self.log.info("setUp-实例化:ok")
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        text = "获取磁盘名称;expect:成功"
        self.log.info(text)
        self.log.info("获取磁盘名称")
        cmd = f"df -h {macro.DB_INSTANCE_PATH}"
        self.log.info(cmd)
        msg = self.primary_root_node.sh(cmd).result()
        self.log.info(msg)
        self.disk_name = msg.splitlines()[-1].split()[0].strip()
        self.inject_cmd = f"rfile_full (diskname) values ({self.disk_name})"
        self.clean_command = f"rfile_full  where(diskname={self.disk_name})"

        text = "-----备份postgresql.conf文件-----"
        self.log.info(text)
        self.file = os.path.join(macro.DB_INSTANCE_PATH,
                                 macro.DB_PG_CONFIG_NAME)
        result = self.user_node.sh(f"cp {self.file} "
                                   f"{self.file}backup").result()
        self.log.info(result)

    def test_main(self):
        text = "--------step1:设置参数并重启生效;expect:成功-----"
        self.log.info(text)
        param_list = ["enable_auto_clean_unique_sql=on",
                      "enable_resource_track=on", "use_workload_manager=on",
                      "instr_unique_sql_count=100"]
        for param in param_list:
            re = self.primary_sh.executeGsguc("set",
                                              self.constant.GSGUC_SUCCESS_MSG,
                                              param)
            self.log.info(re)
            self.assertTrue(re, "执行失败:" + text)

        result = self.primary_sh.stopDbCluster()
        self.assertTrue(result)
        result = self.primary_sh.startDbCluster(get_detail=True)
        self.log.info(result)

        text = "-----step2:清空记录，执行100 unique_sql，" \
               "查看hash table记录条数; expect:100-----"
        self.log.info(text)
        result = self.primary_sh.executDbSql(
            "select reset_unique_sql('global','all',100);"
            "select count(*) from dbe_perf.statement;")
        self.log.info(result)
        self.assertIn("t\n", result, "执行失败:" + text)
        self.assertIn("1\n", result, "执行失败:" + text)
        for j in range(48):
            tablename = f"unique_table_{j}"
            result = self.primary_sh.executDbSql(
                f"drop table if exists {tablename};"
                f"create table {tablename}(a int, b int);"
                f"insert into {tablename} values(1,2);"
                f"drop table if exists {tablename};")
            self.log.info(result)
            self.assertNotIn("ERROR", result, "执行失败:" + text)
        result = self.primary_sh.executDbSql("select count(*) "
                                             "from dbe_perf.statement;")
        self.log.info(result)
        self.assertIn("100\n", result, "执行失败:" + text)

        text = "-----step3:kill -19挂住主节点; expect:成功-----"
        self.log.info(text)
        result = self.com.kill_pid_keyword("gaussdb", 19, self.user_node)
        self.log.info(result)
        time.sleep(1)

        db_status = self.primary_sh.getDbClusterStatus("detail")
        self.log.info(db_status)
        self.assertTrue("cluster_state   : Normal" in db_status or
                        "cluster_state   : Degraded" in db_status,
                        "执行失败" + text)

        text = "-----step4:启线程同时执行reset和101条unique_sql;expect:成功-----"
        self.log.info(text)
        self.log.info("101条unique_sql")
        session1 = ComThread(self.primary_sh.executDbSql,
                             args=("select count(*) from dbe_perf.statement;",))
        session1.setDaemon(True)

        self.log.info("reset")
        session2 = ComThread(self.primary_sh.executDbSql,
                             args=("select "
                                   "reset_unique_sql('global','all',100);",))
        session2.setDaemon(True)

        session1.start()
        session2.start()

        text = "-----step5:kill -18解挂主节点线程，观察hash表记录条数;expect:成功-----"
        self.log.info(text)
        result = self.com.kill_pid_keyword("gaussdb", 18, self.user_node)
        self.log.info(result)
        time.sleep(1)

        session1.join(60)
        result = session1.get_result()
        self.log.info("session1 result:" + result)

        session2.join(60)
        result = session2.get_result()
        self.log.info(result)
        self.log.info("session2 result:" + result)

        result = self.primary_sh.executDbSql("select count(*) "
                                             "from dbe_perf.statement;")
        self.log.info(result)
        self.assertTrue("1\n" in result or "2\n" in result or "4\n" in result
                        or "5\n" in result, "执行失败:" + text)

        text = "-----运行TPCC;expect:成功-----"
        self.log.info(text)
        result = self.com.startTPCC(self.primary_root_node, macro.TPCC_PATH)
        self.log.info(result)
        self.assertIn(self.constant.TPCC_SUCCESS_MSG, result, "执行失败" + text)

        self.log.info("检查主备是否同步")
        res = self.primary_sh.check_data_consistency()
        self.assertTrue(res, "执行失败" + text)

        text = "-----检查数据一致性;expect:成功-----"
        self.log.info(text)
        nodes = (Node(self.nodes[0]), Node(self.nodes[1]),
                 Node(self.nodes[2]))
        flag = self.com.check_data_sample_by_all(r'\d', *nodes)
        self.assertTrue(flag, "执行失败" + text)
        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f"select count(*) from {name};"
            flag = self.com.check_data_sample_by_all(select_sql, *nodes)
            self.assertTrue(flag, "执行失败" + text)

    def tearDown(self):
        text = "-----step6:环境清理;expect:成功-----"
        self.log.info(text)
        self.log.info("-----kill -18解挂主节点线程，防止故障未清除-----")
        result = self.com.kill_pid_keyword("gaussdb", 18, self.user_node)
        self.log.info(result)
        self.log.info("-----恢复postgresql.conf文件-----")
        mv_result = self.user_node.sh(f"mv {self.file}backup "
                                      f"{self.file}").result()
        self.log.info(mv_result)

        result = self.primary_sh.stopDbCluster()
        self.log.info(result)
        result = self.primary_sh.startDbCluster(get_detail=True)
        self.log.info(result)
        db_status = self.primary_sh.getDbClusterStatus("detail")
        self.log.info(db_status)
        self.assertEqual("", mv_result, "执行失败:" + text)
        self.assertIn("cluster_state   : Normal", db_status, "执行失败" + text)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
