"""
Case Type   : 多块预读
Case Name   : 非线性扫描不会预读
Create At   : 2024/12/05
Owner       : @li-xin12345
Description :
    1、建表，重启
    2、查询表, 使用索引, 查看日志是否打印预读信息
    3、删除主键约束，查询，查看日志是否打印预读信息
    4、清理环境
Expect      :
    1、成功
    2、不预读
    3、预读
    4、成功
History     :
"""
import os
import unittest

from yat.test import Node
from yat.test import macro

from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class SessionTest(unittest.TestCase):
    def setUp(self):
        self.log = Logger()
        self.log.info(f'-----{os.path.basename(__file__)} start-----')
        self.constant = Constant()
        self.common = Common()
        self.pri_sh = CommonSH('PrimaryDbUser')
        self.pri_user_node = Node('PrimaryDbUser')
        self.tb_name = 't_multi_bulk_read_0019'
        self.pg_log = \
            os.path.join(macro.PG_LOG_PATH, macro.DN_NODE_NAME.split('/')[0])
        self.shared_buffer_default = self.common.show_param('shared_buffers')

    def test_multi_bulk_read(self):
        text = '----step1: 建表，重启；expect: 成功----'
        self.log.info(text)
        sql = f'''drop table if exists {self.tb_name} cascade;
        create table {self.tb_name} (
            o_orderkey bigint primary key,
            o_custkey integer not null,
            o_orderstatus char(1) not null,
            o_totalprice decimal(15,2) not null,
            o_orderdate date not null,
            o_orderpriority char(15) not null,
            o_clerk char(15) not null,
            o_shippriority integer not null,
            o_comment varchar(79) not null
        );
        insert into {self.tb_name}
        values (generate_series(1, 100000, 2), 454411, 'O', 105846.51, 
        '1996-03-31', '3-MEDIUM', 'Clerk#000070424', 0, 
        's warhorses after the even, even deposits haggle  ironic de');
        insert into {self.tb_name}
        values (generate_series(2, 100000, 2), 14864260, 'F', 109713.24, 
        '1993-10-19', '2-HIGH', 'Clerk#000050083', 0, 'requests. pending, fi');
        '''
        res = self.pri_sh.execut_db_sql(sql)
        self.log.info(res)
        self.assertIn(self.constant.DROP_TABLE_SUCCESS, res, f'执行失败：{text}')
        self.assertIn(self.constant.CREATE_TABLE_SUCCESS, res,
                      f'执行失败：{text}')
        self.assertEqual(res.count('INSERT 0 50000'), 2, f'执行失败：{text}')

        msg = self.pri_sh.restart_db_cluster()
        self.log.info(msg)
        status = self.pri_sh.get_db_cluster_status()
        self.assertTrue("Degraded" in status or "Normal" in status)

        text = '----step2: 查询表, 使用索引, 查看日志是否打印预读信息；' \
               'expect: 不预读----'
        self.log.info(text)
        sql = f"set heap_bulk_read_size = 20;" \
              f"explain select count(*) from {self.tb_name} " \
              f"where o_orderkey > 0 and o_orderkey <= 100000;" \
              f"select count(*) from {self.tb_name} " \
              f"where o_orderkey > 0 and o_orderkey <= 100000;"
        res = self.pri_sh.execut_db_sql(sql)
        self.log.info(res)
        self.assertIn(self.constant.SET_SUCCESS_MSG, res, f'执行失败：{text}')
        self.assertIn('5 rows', res, f'执行失败：{text}')
        self.assertIn('1 row', res, f'执行失败：{text}')

        self.log.info('--查看日志是否打印预读信息--')
        cmd = f"cd {self.pg_log};" \
              f"ls -t | head -1 | awk '{{{{print $1}}}}' | " \
              f"xargs cat | tail -30 | " \
              f"grep 'End of pre-Read, the max blocks batch is'"
        self.log.info(cmd)
        res = self.common.get_sh_result(self.pri_user_node, cmd)
        self.log.info(res)
        self.assertIn('', res, f'执行失败：{text}')

        text = '----step3: 删除主键约束，查询，查看日志是否打印预读信息；' \
               'expect: 预读----'
        self.log.info(text)
        sql = f"alter table {self.tb_name} drop constraint {self.tb_name}_pkey;"
        res = self.pri_sh.execut_db_sql(sql)
        self.log.info(res)
        self.assertIn(self.constant.ALTER_TABLE_MSG, res, f'执行失败：{text}')

        msg = self.pri_sh.restart_db_cluster()
        self.log.info(msg)
        status = self.pri_sh.get_db_cluster_status()
        self.assertTrue("Degraded" in status or "Normal" in status)

        sql = f"set heap_bulk_read_size = 20;" \
              f"explain select count(*) from {self.tb_name} " \
              f"where o_orderkey > 0 and o_orderkey <= 100000;" \
              f"select count(*) from {self.tb_name} " \
              f"where o_orderkey > 0 and o_orderkey <= 100000;"
        res = self.pri_sh.execut_db_sql(sql)
        self.log.info(res)
        self.assertIn(self.constant.SET_SUCCESS_MSG, res, f'执行失败：{text}')
        self.assertIn('3 rows', res, f'执行失败：{text}')
        self.assertIn('1 row', res, f'执行失败：{text}')

        self.log.info('--查看日志是否打印预读信息--')
        cmd = f"cd {self.pg_log};" \
              f"ls -t | head -1 | awk '{{{{print $1}}}}' | " \
              f"xargs cat | tail -30 | " \
              f"grep 'End of pre-Read, the max blocks batch is'"
        self.log.info(cmd)
        res = self.common.get_sh_result(self.pri_user_node, cmd)
        self.log.info(res)
        self.assertIsNotNone(res, f'执行失败：{text}')

    def tearDown(self):
        text = '----step4: 清理环境;expect:成功----'
        self.log.info(text)
        sql = f"drop table if exists {self.tb_name} cascade;"
        res = self.pri_sh.execut_db_sql(sql)
        self.log.info(res)
        self.assertIn(self.constant.DROP_TABLE_SUCCESS, res, f'执行失败：{text}')
        self.log.info(f'-----{os.path.basename(__file__)} end-----')
