"""
Case Type   : GUC参数--资源消耗
Case Name   : 通过explain(analyze,buffers)查看多少数据来源磁盘，
              多少来源shared_buffers
Create At   : 2021/04/08
Owner       : opentestcase005
Description :
        1、查询shared_buffers默认值；
        2、创建表并插入数据
        3、创建索引
        4、执行explain (analyze,buffers)语句
        5、再次执行上述语句
        6、清理环境
Expect      :
        1、显示默认值为512M，资料描述默认值是8MB，om工具修改1GB，自动化环境为512M
        2、创建表并插入数据成功
        3、创建索引成功
        4、执行计划中的结果以shared read显示，表示取自磁盘且不被缓存
        5、再次执行查询计划后，结果以shared hit显示，表示读取shared_buffers
        6、清理环境完成
History     :
    modified by opentestcase013 at 2022/5/23:优化用例，确保第一次结果以shared read显示，
    另修查询shared_buffers默认值的断言为512MB，改适配自动化环境
    (自动化环境数据库shared_buffers为512M，非1GB，门禁仍为1GB，所以取消步骤1断言)
    modified by li-xin12345 at 2024/12/16:优化用例，确保第一次查询从磁盘读取，后续查询从缓存读取
"""
import unittest
import os
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class GUC(unittest.TestCase):
    def setUp(self):
        self.log = Logger()
        self.log.info(f'-----{os.path.basename(__file__)} start-----')
        self.Constant = Constant()
        self.commonsh = CommonSH('dbuser')
        self.tb_name = 't_resource_0097'
        self.index_name = 'i_resource_0097'

    def test_startdb(self):
        self.log.info('--step1:查询该参数默认值;expect:显示默认值为512M--')
        res = self.commonsh.execut_db_sql('''show shared_buffers;''')
        self.log.info(res)

        self.log.info('--step2:创建表并插入数据;expect:成功--')
        self.log.info('--step2.1:查询pg_buffercache_pages中'
                      'relfilenode为空的数量--')
        sql_cmd = '''
        select count(*) from pg_buffercache_pages() where relfilenode is null;
        '''
        shared_buffers_upper_limit = self.commonsh.execut_db_sql(
            sql_cmd).splitlines()[-2].strip()
        self.log.info(shared_buffers_upper_limit)
        self.log.info('--step2.2:建表，表中插入数据量约为relfilenode为空的数量;'
                      'expect:成功')
        sql_cmd = f'''
        drop table if exists {self.tb_name};
        create table {self.tb_name}(id int,name varchar(20));
        begin
           for i in 1..{shared_buffers_upper_limit} loop
               insert into {self.tb_name} values(i, i||'a');
           end loop;
        end;
        '''
        res = self.commonsh.execut_db_sql(sql_cmd)
        self.log.info(res)
        self.assertIn(self.Constant.CREATE_ANONYMOUS_BLOCK_SUCCESS_MSG,
                      res)

        self.log.info('--step3:创建索引;expect:成功--')
        res = self.commonsh.execut_db_sql(f'''
        create index {self.index_name} on {self.tb_name} using btree(id);''')
        self.log.info(res)
        self.assertIn(self.Constant.CREATE_INDEX_SUCCESS_MSG,
                      res)

        self.log.info('--重启--')
        msg = self.commonsh.restart_db_cluster()
        self.log.info(msg)
        status = self.commonsh.get_db_cluster_status()
        self.assertTrue("Normal" in status)

        self.log.info('--step4:执行explain (analyze,buffers)语句;'
                      'expect:执行计划中的结果以shared read显示--')
        res = self.commonsh.execut_db_sql(f'''explain (analyze,buffers) 
                    select * from {self.tb_name} where id =20;''')
        self.log.info(res)
        self.assertIn('Buffers: shared read', res)

        self.log.info('--step5:再次执行explain (analyze,buffers)语句;'
                      'expect:执行计划中的结果以shared hit显示--')
        res = self.commonsh.execut_db_sql(f'''explain (analyze,buffers) 
                            select * from {self.tb_name} where id =20;''')
        self.log.info(res)
        self.assertIn('Buffers: shared hit', res)

    def tearDown(self):
        self.log.info('--step6:清理环境;expect:成功--')
        drop_table = self.commonsh.execut_db_sql(
            f'drop table if exists {self.tb_name} ;')
        self.log.info(drop_table)
        self.assertEqual(drop_table, self.Constant.TABLE_DROP_SUCCESS, "删表失败")
        self.log.info(f'-----{os.path.basename(__file__)} end-----')