"""
Case Type   : 故障&可靠性
Case Name   : 创建在线索引的同时删除表并执行vacuum及analyze
Create At   : 2020/12/28
Owner       : opengauss
Description :
    1.创建表并插入数据
    2.创建在线索引
    3.创建在线索引的同时执行vacuum;analyze;
    4.创建在线索引的同时删除数据
    5.使用索引时查询数据
    6.不使用索引时查询数据
    7.删除索引
    8.插入数据
    9.重复执行2-8 30次
Expect      :
    1.创建表插入数据成功
    2.创建索引成功
    3.执行vacuum，analyze成功
    4.删除数据成功
    5.无
    6.使用索引和不使用索引查询结果相同
    7.删除索引成功
    8.插入数据成功
    9.无
History     :
     Modified by @wan005 2021/1/11: 修改执行顺序，利于vacuum异常后定位
     Modified by @wan005 2021/1/25: vacuum指定表，避免vacuum全数据时间过长
     Modified by @wan005 2021/1/25: vacuum delete为同级锁会发生死锁，修改断言
     Modified by @wan005 2021/3/19: 修改断言，当发生死锁是索引可能创建不成功
     Modified by wx1115623 2022/12/5: 修改断言，适配禁用indexscan时候不禁用indexonlyscan
     Modified by wx1115623 2022/12/13: 修改循环步骤，规避step8插入数据导致下次循环时选择不同的扫描算子
     Modified by ningyali 2024/7/9 优化用例解决一致性问题
"""
import os
import time
import unittest

from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import CommonSH, RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class Concurrently(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.pri_sh = CommonSH(self.nodes_tuple[0])
        self.pir_user = self.pri_sh.node
        self.log = Logger()
        self.Constant = Constant()
        self.tblname = 't_dbsystem_case225'
        self.idxname = 'i_dbsystem_case225'
        self.log.info(f"-----{os.path.basename(__file__)} start-----")

    def test_in_select(self):
        for i in range(30):
            self.log.info(f'-----round {i}-----')
            self.log.info('-----step1:创建表并插入数据;expect:创建表插入数据成功-----')
            sql = f'drop table if exists {self.tblname};' \
                  f'create table {self.tblname}(' \
                  f'id int, first_name text, last_name text);'
            result = self.pri_sh.executDbSql(sql)
            self.assertIn(self.Constant.CREATE_TABLE_SUCCESS, result)

            self.log.info('-----insert data-----')
            sql_cmd = f'insert into {self.tblname} ' \
                      f'select id, md5(random()::text), md5(random()::text) ' \
                      f'from (' \
                      f'select * from generate_series(1,3000000) as id) ' \
                      f'as x;' \
                      f'update {self.tblname} set ' \
                      f'first_name=\'测试查询不阻塞\', ' \
                      f'last_name=\'测试%%不阻塞\' ' \
                      f'where id = 712;' \
                      f'analyze {self.tblname};'
            result = self.pri_sh.executDbSql(sql_cmd)
            self.assertIn(self.Constant.insert_success_msg, result)
            self.assertIn(self.Constant.update_success_msg, result)

            self.log.info('-----step2:创建索引成功;expect:创建索引成功-----')
            sql = f'select current_time;' \
                  f'create index concurrently {self.idxname} ' \
                  f'on {self.tblname} using btree(id);' \
                  f'select current_time;'
            create_index_thread = ComThread(self.pri_sh.executDbSql,
                                            args=(sql,))
            create_index_thread.setDaemon(True)
            create_index_thread.start()

            self.log.info('-----step3:创建在线索引的同时执行vacuum;analyze;'
                          'expect:执行vacuum，analyze成功-----')
            time.sleep(0.1)
            sql = f'vacuum {self.tblname};' \
                  f'analyze {self.tblname};'
            result_vacuum = self.pri_sh.executDbSql(sql)
            self.log.info(result_vacuum)
            flag = (self.Constant.vacuum_success_msg in result_vacuum) or \
                   ('deadlock detected' in result_vacuum)
            self.assertTrue(flag)

            self.log.info('-----step4:创建在线索引的同时删除数据;expect:删除数据成功-----')
            sql = f'select current_time;' \
                  f'delete  from {self.tblname} ' \
                  f'where id>70000 and id <800000;' \
                  f'select current_time; '
            delete_thread = ComThread(self.pri_sh.executDbSql,
                                      args=(sql,))
            delete_thread.setDaemon(True)
            delete_thread.start()

            self.log.info('-----check delete data result-----')
            delete_thread.join(10 * 60)
            delete_result = delete_thread.get_result()
            self.log.info(delete_result)
            flag = (self.Constant.delete_success_msg in delete_result) or \
                   ('deadlock detected' in delete_result)
            self.assertTrue(flag)

            self.log.info('-----check create index result-----')
            create_index_thread.join(10 * 60)
            create_idx_result = create_index_thread.get_result()
            self.log.info(create_idx_result)
            flag = (self.Constant.CREATE_INDEX_SUCCESS_MSG in
                    create_idx_result) or \
                   ('deadlock detected' in create_idx_result)
            self.assertTrue(flag)

            self.log.info('-----step5:使用索引时查询数据;expect:查询成功-----')
            self.log.info('-----explain -----')
            sql = f'analyze {self.tblname};' \
                  f'set enable_seqscan=off;' \
                  f'explain select count(*) from {self.tblname} ' \
                  f'where id >60000 and id <2000000;'
            result = self.pri_sh.executDbSql(sql)
            if 'deadlock detected' not in create_idx_result:
                self.assertIn(self.idxname, result)

            self.log.info('-----select-----')
            sql = f'set enable_seqscan=off;' \
                  f'set enable_indexscan=on;' \
                  f'set enable_bitmapscan=off;' \
                  f'select count(*) from {self.tblname} ' \
                  f'where id >60000 and id <2000000;'
            result_index = self.pri_sh.executDbSql(sql)

            self.log.info('-----step6:不使用索引时查询数据;'
                          'expect:使用索引和不使用索引查询结果相同-----')
            self.log.info('-----explain-----')
            sql = f'set enable_indexscan=off;' \
                  f'set enable_bitmapscan=off;' \
                  f'explain select count(*) from {self.tblname} ' \
                  f'where id >60000 and id <2000000;'
            result = self.pri_sh.executDbSql(sql)
            self.assertIn(self.idxname, result)

            self.log.info('-----select-----')
            sql = f'set enable_indexscan=off;' \
                  f'set enable_bitmapscan=off;' \
                  f'select count(*) from {self.tblname} ' \
                  f'where id >60000 and id <2000000;'
            result_without_index = self.pri_sh.executDbSql(sql)
            self.assertIn(result_without_index, result_index)

            self.log.info('-----step7:删除索引;expect:删除索引成功-----')
            result = self.pri_sh.executDbSql(f'drop index {self.idxname};')
            self.assertIn(self.Constant.drop_index_success_msg, result)

            self.log.info('-----step8:插入数据;expect:插入数据成功-----')
            sql = f'insert into {self.tblname} ' \
                  f'select id, md5(random()::text), md5(random()::text) ' \
                  f'from (select * from generate_series(65000,900000) ' \
                  f'as id) as x;'
            result = self.pri_sh.executDbSql(sql)
            self.assertIn(self.Constant.insert_success_msg, result)

    def tearDown(self):
        self.log.info('-----this is tearDown-----')
        self.log.info('-----drop table-----')
        result = self.pri_sh.getDbClusterStatus('datail')
        self.log.info(result)
        result = self.pri_sh.executDbSql(
            f'drop table if exists {self.tblname};')
        self.assertIn(self.Constant.DROP_TABLE_SUCCESS, result)
        self.log.info(result)
