"""
Case Type   : DDL-delta-table
Case Name   : 验证列存范围分区表自动迁入
Create At   : 2022/10/11
Owner       : liu-tong-8848
Description :
    1.获取autovacuum_naptime默认值，并修改参数
    2.建列存范围分区表并设置阈值
    3.在autovacuum_naptime时效内，多次不超过但总和超过阈值的插入值
    4.观察autovacuum_naptime时效到后delta表内数据是否迁入主表
    5.环境清理
Expect      :
    1.成功，设置成功
    2.成功
    3.数据delta表，时效内不迁入主表
    4.时效后迁入主表
    5.成功
History     :
"""

import os
import unittest

from yat.test import Node
from yat.test import macro

from testcase.utils.Common import Common
from testcase.utils.Common import CommonSH
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant

COMMONSH = CommonSH("PrimaryDbUser")


class Tools(unittest.TestCase):
    def setUp(self):
        self.log = Logger()
        self.common = Common()
        self.log.info(f'----{os.path.basename(__file__)} start----')
        self.primary_root_node = Node('PrimaryRoot')
        self.primary_node = Node('PrimaryDbUser')
        self.constant = Constant()
        self.tb_name = "t_test_delta_0007"

        self.log.info('------备份postgres.conf文件----------')
        self.conf_path = os.path.join(
            macro.DB_INSTANCE_PATH, macro.DB_PG_CONFIG_NAME)
        shell_cmd = f"cp {self.conf_path} {self.conf_path}_testbak"
        self.log.info(shell_cmd)
        result = self.common.get_sh_result(self.primary_node, shell_cmd)
        self.log.info(result)
        self.assertEqual("", result, "执行失败: 备份postgres.conf文件")

        status = COMMONSH.get_db_cluster_status("detail")
        self.log.info(status)
        self.assertTrue("Normal" in status or "Degraded" in status)

    def test_delta(self):
        text = '----step1:修改参数;expect:成功----'
        self.log.info(text)
        param_dict = {"track_counts":"on", "autovacuum":"on",
                      "autovacuum_max_workers":"3", "autovacuum_mode":"mix",
                      "autovacuum_naptime":"1min", "enable_delta_store":"on"}
        param_key = list(param_dict.keys())
        param_values = list(param_dict.values())
        for i in range(len(param_key)):
            content = self.common.show_param(param_key[i])
            self.log.info(content)
            if content != param_values[i]:
                result = COMMONSH.execute_gsguc('reload',
                                        self.constant.GSGUC_SUCCESS_MSG,
                                        f"{param_key[i]}='{param_values[i]}'")
                self.assertTrue(result, "执行失败" + text)
        status = COMMONSH.restart_db_cluster()
        self.log.info(status)
        status = COMMONSH.get_db_cluster_status("detail")
        self.log.info(status)
        self.assertTrue("Normal" in status or "Degraded" in status)

        text = '----step2:建列存范围分区表并设置阈值;expect:成功----'
        self.log.info(text)
        sql = f"drop table if exists {self.tb_name} cascade;" \
              f"create table {self.tb_name}(c_int int) " \
              f"with (orientation = column, deltarow_threshold=100) " \
              f"partition by range(c_int)(" \
              f"partition p_{self.tb_name}_01 values less than (500)," \
              f"partition p_{self.tb_name}_02 values less than (1000)," \
              f"partition p_{self.tb_name}_03 values less than (1500));"
        result = COMMONSH.execut_db_sql(sql)
        self.log.info(result)
        self.assertIn(self.constant.TABLE_CREATE_SUCCESS,
                      result, "执行失败" + text)
        result = COMMONSH.execut_db_sql(f"select oid from pg_partition "
                                    f"where relname = 'p_{self.tb_name}_01';"
                                    f"select oid from pg_partition "
                                    f"where relname = 'p_{self.tb_name}_02';"
                                    f"select oid from pg_partition "
                                    f"where relname = 'p_{self.tb_name}_03';")
        self.log.info(result)
        self.assertNotIn("ERROR", result, "执行失败" + text)
        self.log.info("获取delta_oid")
        delta1_oid = result.splitlines()[-12].strip()
        self.log.info(delta1_oid)
        delta2_oid = result.splitlines()[-7].strip()
        self.log.info(delta2_oid)
        delta3_oid = result.splitlines()[-2].strip()
        self.log.info(delta3_oid)

        text = '---step3:在autovacuum_naptime时效内，多次不超过但总和超过阈值的插入值;expect:成功---'
        self.log.info(text)
        sql = f"insert into {self.tb_name} " \
              f"select random()*99+100 from generate_series(1,99);" \
              f"insert into {self.tb_name} " \
              f"select random()*198+100 from generate_series(100,198);" \
              f"insert into {self.tb_name} " \
              f"select random()*499+500 from generate_series(500,598);" \
              f"insert into {self.tb_name} " \
              f"select random()*499+500 from generate_series(600,698);" \
              f"select count(*) from {self.tb_name};" \
              f"select count(*) from {self.tb_name} " \
              f"partition (p_{self.tb_name}_01);" \
              f"select count(*) from {self.tb_name} " \
              f"partition (p_{self.tb_name}_02);" \
              f"select count(*) from {self.tb_name} " \
              f"partition (p_{self.tb_name}_03);"
        result = COMMONSH.execut_db_sql(sql)
        self.log.info(result)
        self.assertEqual(4, result.count("INSERT"), "执行失败" + text)
        self.assertIn("396\n", result, "执行失败" + text)
        result = COMMONSH.execut_db_sql(f"select count(*) "
                f"from cstore.pg_delta_part_{delta1_oid};"
                f"select count(*) from cstore.pg_delta_part_{delta2_oid};"
                f"select count(*) from cstore.pg_delta_part_{delta3_oid};")
        self.log.info(result)
        self.assertEqual(2, result.count("198\n"), "执行失败" + text)
        self.assertIn("0\n", result, "执行失败" + text)

        text = '---step4:观察autovacuum_naptime时效后delta数据是否迁入主表;expect:迁入主表---'
        self.log.info(text)
        result = COMMONSH.execut_db_sql(f"select pg_sleep(60);"
            f"select count(*) from {self.tb_name};"
            f"select count(*) from {self.tb_name} "
            f"partition (p_{self.tb_name}_01);"
            f"select count(*) from {self.tb_name} "
            f"partition (p_{self.tb_name}_02);"
            f"select count(*) from {self.tb_name} "
            f"partition (p_{self.tb_name}_03);"
            f"select count(*) from cstore.pg_delta_part_{delta1_oid};"
            f"select count(*) from cstore.pg_delta_part_{delta2_oid};"
            f"select count(*) from cstore.pg_delta_part_{delta3_oid};")
        self.log.info(result)
        self.assertIn("396\n", result, "执行失败" + text)
        self.assertEqual(2, result.count("198\n"), "执行失败" + text)
        self.assertEqual(4, result.count("0\n"), "执行失败" + text)

    def tearDown(self):
        text = "----step5:恢复环境 expect:成功----"
        self.log.info(text)
        sql = f"drop table if exists {self.tb_name} cascade;"
        drop_table_result = COMMONSH.execut_db_sql(sql)
        self.log.info(drop_table_result)
        self.log.info('--------------还原配置文件-------------')
        shell_cmd = f"rm -rf {self.conf_path};" \
                    f"cp {self.conf_path}_testbak {self.conf_path};" \
                    f"rm -rf {self.conf_path}_testbak"
        self.log.info(shell_cmd)
        result = self.primary_node.sh(shell_cmd).result()
        self.log.info(result)
        result = COMMONSH.restart_db_cluster()
        self.log.info(result)
        status = COMMONSH.get_db_cluster_status("detail")
        self.log.info(status)
        self.assertIn(self.constant.TABLE_DROP_SUCCESS, drop_table_result, "执行失败" + text)
        self.assertTrue("Normal" in status or "Degraded" in status)
        self.log.info(f'----{os.path.basename(__file__)} finish----')