"""
Case Type   : 可靠性&故障
Case Name   : 创建在线索引的同时更新表并执行vacuum及ANALYZE
Create At   : 2020/12/28
Owner       : opengauss
Description :
    1.创建表并插入数据
    2.创建在线索引
    3.执行vacuum;ANALYZE;
    4.创建在线索引的同时更新数据
    5.删除索引
    6.重复执行2-5 30次
Expect      :
    1.创建表插入数据成功
    2.创建索引成功
    3.执行vacuum，analyze成功
    4.更新数据成功
    5.删除索引成功
    6.无
History     :
     Modified by @wan005 2021/1/11: 修改执行顺序，利于vacuum异常后定位
     Modified by @wan005 2021/1/25: vacuum指定表，避免vacuum全数据时间过长
     Modified by @wan005 2021/1/25: vacuum delete为同级锁会发生死锁，修改断言
     Modified by @wan005 2021/3/10: 修改用例为频繁执行vacuum，
     因为vacuum会和create index以及update冲突
     Modified by @wan005 2022/10/10: 用例执行前增加环境检查，执行结束后增加重启和一致性检查
     Modified by ningyali 2024/7/9 优化用例解决一致性问题
"""
import os
import unittest

from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger


class Concurrently(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.pri_sh = CommonSH(self.nodes_tuple[0])
        self.pri_user = self.pri_sh.node
        self.log = Logger()
        self.Constant = Constant()
        self.t_name = 't_dbsystem_case0226'
        self.i_name = 'i_dbsystem_case0226'
        text = f'---{os.path.basename(__file__)} start---'
        self.log.info(text)

    def test_in_select(self):
        self.log.info('-------create table-----------')
        result = self.pri_sh.executDbSql(
            f'DROP TABLE IF EXISTS {self.t_name};'
            f'CREATE TABLE {self.t_name}'
            f'(id INT, first_name text, last_name text);')
        self.log.info(result)
        self.assertIn(self.Constant.CREATE_TABLE_SUCCESS, result)
        self.log.info('------------insert data-------------------')
        result = self.pri_sh.executDbSql(
            f"INSERT INTO {self.t_name} SELECT id, md5(random()::text), "
            f"md5(random()::text) FROM (SELECT * FROM "
            f"generate_series(1,3000000) AS id) AS x;")
        self.log.info(result)
        self.assertIn('INSERT', result)

        for i in range(5):
            self.log.info(f'----------------round:{i}----------------')
            self.log.info('-----------create index---------------')
            sql = f'select current_time;create index concurrently ' \
                  f'{self.i_name} on {self.t_name} USING btree(id);' \
                  f'select current_time;'
            create_index_thread = ComThread(
                self.pri_sh.executDbSql, args=(sql,))
            create_index_thread.setDaemon(True)
            create_index_thread.start()

            self.log.info('----------update table---------------')
            sql = f"select current_time;" \
                  f"update {self.t_name} set first_name='测试 test 查询不阻塞', " \
                  f"last_name='测试--不阻塞' where id > 712;select current_time;"
            delete_thread = ComThread(
                self.pri_sh.executDbSql, args=(sql,))
            delete_thread.setDaemon(True)
            delete_thread.start()

            self.log.info('-------check update data result----')
            delete_thread.join(10 * 60)
            delete_result = delete_thread.get_result()
            self.log.info(delete_result)
            before_create_time = delete_result.splitlines()[2].strip()
            after_create_time = delete_result.splitlines()[-2].strip()
            self.log.info(f'before_create_time is {before_create_time}')
            self.log.info(f'after_create_time is {after_create_time}')
            delete_time_h = before_create_time.split(':')[0]
            delete_time_m = before_create_time.split(':')[1]
            delete_time_s = before_create_time.split(':')[2].split('.')[0]
            delete_time_h1 = after_create_time.split(':')[0]
            delete_time_m1 = after_create_time.split(':')[1]
            delete_time_s1 = after_create_time.split(':')[2].split('.')[0]
            delete_time = int(delete_time_h1) * 60 * 60 \
                          + int(delete_time_m1) * 60 + int(delete_time_s1) \
                          - int(delete_time_h) * 60 * 60 - int(delete_time_m) \
                          * 60 - int(delete_time_s)
            self.log.info(f'update_time is {delete_time}')
            self.assertIn('UPDATE', delete_result)

            self.log.info('---------check create index result---------')
            create_index_thread.join(10 * 60)
            create_idx_result = create_index_thread.get_result()
            self.log.info(create_idx_result)
            self.assertIn(
                self.Constant.CREATE_INDEX_SUCCESS_MSG, create_idx_result)
            before_create_time = create_idx_result.splitlines()[2].strip()
            after_create_time = create_idx_result.splitlines()[-2].strip()
            self.log.info(f'before_create_time is {before_create_time}')
            self.log.info(f'after_create_time is {after_create_time}')
            create_time_h = before_create_time.split(':')[0]
            create_time_m = before_create_time.split(':')[1]
            create_time_s = before_create_time.split(':')[2].split('.')[0]
            create_time_h1 = after_create_time.split(':')[0]
            create_time_m1 = after_create_time.split(':')[1]
            create_time_s1 = after_create_time.split(':')[2].split('.')[0]
            create_index_time = int(create_time_h1) * 60 * 60 + int(
                create_time_m1) * 60 + int(create_time_s1) - int(
                create_time_h) * 60 * 60 - int(create_time_m) * 60 \
                                - int(create_time_s)
            self.log.info(f'create_index_time is {create_index_time}')
            self.assertIn(self.Constant.CREATE_INDEX_SUCCESS_MSG,
                          create_idx_result)
            self.assertLessEqual(int(delete_time), int(create_index_time))

            self.log.info(f'-----------vacuum&ANALYZE--------')
            sql = f'vacuum {self.t_name};ANALYZE {self.t_name};'
            result = self.pri_sh.executDbSql(sql)
            self.log.info(result)
            flag = 'VACUUM' in result or 'deadlock detected' in result
            self.assertTrue(flag)

            self.log.info('----------------drop index-----------------')
            result = self.pri_sh.executDbSql(
                f'drop index {self.i_name};')
            self.log.info(result)
            self.assertIn('DROP', result)

    def tearDown(self):
        self.log.info('-------------this is tearDown-----------------')
        result = self.pri_sh.getDbClusterStatus('datail')
        self.log.info(result)
        self.log.info('----------------drop table-------')
        result = self.pri_sh.executDbSql(
            f'drop table if exists {self.t_name};')
        self.log.info(result)
        stop_result = self.pri_sh.stopDbInstance()
        self.log.info(stop_result)
        start_result = self.pri_sh.startDbInstance()
        self.log.info(start_result)
        result = self.pri_sh.getDbClusterStatus("detail")
        self.log.info(result)
        if not start_result:
            result = self.pri_sh.check_data_consistency()
            self.assertTrue(result)
        text = f'-----{os.path.basename(__file__)} finish-----'
        self.log.info(text)
