"""
Case Type   : reindex
Case Name   : 使用reindex重建表索引的同时新建索引
Create At   : 2022/08/26
Owner       : liu-tong-8848
Description :
    1.建表
    2.向表中插入数据
    3.对表创建索引
    4.会话一对表在线重建索引
    5.会话二同时新建索引
    6.清理环境
Expect      :
    1.建表成功
    2.向表中插入数据成功
    3.对表创建索引成功
    4.成功
    5.成功
    6.清理环境成功
History     :
"""

import os
import time
import unittest
from yat.test import Node
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.ComThread import ComThread
from testcase.utils.Logger import Logger


class SQL(unittest.TestCase):
    def setUp(self):
        self.logger = Logger()
        self.logger.info(f'-----{os.path.basename(__file__)} start-----')
        self.dbuserNode = Node('PrimaryDbUser')
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.table = 't_reindex_concurrently_0055'
        self.index1 = 'i_reindex_concurrently_0055_1'
        self.index2 = 'i_reindex_concurrently_0055_2'
        self.Constant = Constant()

    def test_reindex(self):
        step1 = 'step1:建表 expect:建表成功'
        self.logger.info(step1)
        create_table = self.primary_sh.execut_db_sql(f'''
            drop table if exists {self.table};
            create table {self.table}(id int,body text);''')
        self.logger.info(create_table)
        self.assertIn(self.Constant.TABLE_DROP_SUCCESS, create_table,
                      "执行失败" + step1)
        self.assertIn(self.Constant.CREATE_TABLE_SUCCESS, create_table,
                      "建分区表失败" + step1)

        step2 = 'step2:向表中插入数据 expect:向表中插入数据成功'
        self.logger.info(step2)
        insert_data = self.primary_sh.execut_db_sql(
            f'''insert into {self.table} values(generate_series(1,1,1100000),
            '2010-1-1');''')
        self.logger.info(insert_data)
        self.assertIn(self.Constant.INSERT_SUCCESS_MSG, insert_data,
                      "执行失败" + step2)

        step3 = 'step3:对表创建索引 expect:对表创建索引成功'
        self.logger.info(step3)
        create_index = self.primary_sh.execut_db_sql(
            f'''drop index if exists {self.index1};
            create index {self.index1} on {self.table}(id);''')
        self.logger.info(create_index)
        self.assertIn(self.Constant.DROP_INDEX_SUCCESS_MSG, create_index,
                      "执行失败" + step3)
        self.assertIn(self.Constant.CREATE_INDEX_SUCCESS_MSG, create_index,
                      "执行失败" + step3)

        step4 = 'step4:会话一对表在线重建索引 expect:成功'
        self.logger.info(step4)
        reindex = f'''reindex table concurrently {self.table};
            select pg_sleep(60);'''
        session1 = ComThread(self.primary_sh.execut_db_sql, args=(reindex,))
        session1.setDaemon(True)

        step5 = 'step5:会话二同时对表新建索引,看是否发生死锁 expect:不发生死锁，等待reindex结束后新建索引'
        self.logger.info(step5)
        de_index = f'''drop index if exists {self.index2};
            create index {self.index2} on {self.table}(id);'''
        session2 = ComThread(self.primary_sh.execut_db_sql, args=(de_index,))
        session2.setDaemon(True)
        session1.start()
        time.sleep(2)
        session2.start()
        session1.join(65)
        session2.join(20)
        result1 = session1.get_result()
        self.logger.info(result1)
        result2 = session2.get_result()
        self.logger.info(result2)
        self.assertIn('REINDEX', result1, "执行失败" + step5)
        self.assertIn(self.Constant.DROP_INDEX_SUCCESS_MSG, result2, "执行失败" + step5)
        self.assertIn(self.Constant.CREATE_INDEX_SUCCESS_MSG, result2, "执行失败" + step5)

    def tearDown(self):
        step6 = 'step6:清理环境 expect:清理环境成功'
        self.logger.info(step6)
        clean_environment = self.primary_sh.execut_db_sql(
            f'''drop table {self.table} cascade;''')
        self.logger.info(clean_environment)
        self.assertIn(self.Constant.TABLE_DROP_SUCCESS, clean_environment,
                      "执行失败" + step6)
        self.logger.info(f'-----{os.path.basename(__file__)} end-----')
