"""
Case Type   : 硬件故障类--CPU
Case Name   : DML+DDL事务并发(部分已提交、部分未提交)备节点cpu占用100%
Create At   : 2021/07/08
Owner       : 1115623
Description :
    1.备节点注入CPU占用率100%故障
    2.开启多个session同时执行DDL+DML
    3.检查主备一致性
Expect      :
    1.成功
    2.sql执行成功
    3.主备一致
History     :
    Modified by 1115623 2023/2/6:修改注入CPU占用率故障断言信息
    Modified by 1115623 2023/2/20:修改CPU占用率故障注入语句，规避故障注入失败
    Modified by peilinqian 2024/7/9:executDbSql方法调用优化
"""

import unittest
import os
import time
from yat.test import Node
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
from testcase.utils.CommonSH import RestartDbCluster
from testcase.utils.Logger import Logger
from testcase.utils.ComThread import ComThread


class CPUOverloadAndRunTPCC(unittest.TestCase):
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info(f"-----{os.path.basename(__file__)} start-----")
        self.sta_root_node = Node(node='Standby1Root')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.sta1_dbuser = Node(node='Standby1DbUser')
        self.sta2_dbuser = Node(node='Standby2DbUser')
        self.com = Common()
        self.primary_sh = CommonSH('PrimaryDbUser')
        self.sta_1_sh = CommonSH('Standby1DbUser')
        self.sta_2_sh = CommonSH('Standby2DbUser')
        self.tb_name = "cpu_tb"
        self.constant = Constant()

    def test_main(self):
        self.log.info("--------------1.备节点注入CPU占用率高故障-------------")
        shell_cmd = "cat /proc/cpuinfo |grep 'processor'|wc -l"
        self.log.info(shell_cmd)
        cpu_num = self.sta_root_node.sh(shell_cmd).result()
        self.log.info(cpu_num)
        inject_command = f"rCPU_Overloadau (cpuid1, cpuid2, usage, time) " \
            f"values(0, {int(cpu_num)-1}, 100, 20)"
        result = self.com.cfe_inject(self.sta_root_node, inject_command)
        self.log.info(result)
        flag = self.constant.cfe_inject_rcpu_overloadau_success_msg in result
        if not flag:
            res = self.com.cfe_inject(self.sta_root_node, inject_command)
            self.log.info(res)
            self.assertIn(self.constant.cfe_inject_rcpu_overloadau_success_msg,
                          res.strip())

        self.log.info("-------------2.开启多个session同时执行DDL+DML---------------")
        self.log.info("-----session1------------")
        sql = f"create table if not exists {self.tb_name}(i int, j int);" \
            f"create index {self.tb_name}_idx on {self.tb_name}(i);" \
            f"insert into {self.tb_name} " \
            f"values(generate_series(1,90000),generate_series(1,90000));"
        connect_thread1 = ComThread(
            self.primary_sh.executDbSql, args=(sql,))
        connect_thread1.setDaemon(True)
        connect_thread1.start()
        self.log.info("-----session2------------")
        connect_thread2 = ComThread(
            self.primary_sh.executDbSql, args=('vacuum;',))
        connect_thread2.setDaemon(True)
        connect_thread2.start()
        self.log.info("-----session3------------")
        sql = f"create table if not exists {self.tb_name}3(i int, j int);" \
            f"insert into {self.tb_name}3 " \
            f"values(generate_series(1,900),generate_series(1,900)); " \
            f"create table if not exists {self.tb_name}1(i int, j int);" \
            f"insert into {self.tb_name}1 " \
            f"values(generate_series(1,900),generate_series(5001,5900));" \
            f"merge into {self.tb_name}1 t1 " \
            f"using {self.tb_name}3 t2 on (t1.i=t2.i) " \
            f"when matched then update set t1.j=t2.j " \
            f"when not matched then insert values(0,0);"
        connect_thread3 = ComThread(
            self.primary_sh.executDbSql, args=(sql,))
        connect_thread3.setDaemon(True)
        connect_thread3.start()
        self.log.info("-----session4------------")
        sql = f"select pg_sleep(3);" \
            f"create table if not exists {self.tb_name}2(i int, j int);" \
            f"insert into {self.tb_name}1 " \
            f"values(generate_series(1,900000),generate_series(1,900000));" \
            f"truncate {self.tb_name}2;"
        connect_thread4 = ComThread(
            self.primary_sh.executDbSql, args=(sql,))
        connect_thread4.setDaemon(True)
        connect_thread4.start()

        self.log.info("----------获取线程结果----------------------")
        self.log.info("-----session1------------")
        connect_thread1.join(600)
        result = connect_thread1.get_result()
        self.log.info(result)
        self.assertIn(self.constant.CREATE_TABLE_SUCCESS, result)
        self.assertIn("INSERT 0 90000", result)
        self.log.info("-----session2------------")
        connect_thread2.join(600)
        result = connect_thread2.get_result()
        self.log.info(result)
        self.assertIn("VACUUM", result)
        self.log.info("-----session3------------")
        connect_thread3.join(600)
        result = connect_thread3.get_result()
        self.log.info(result)
        self.assertIn(self.constant.CREATE_TABLE_SUCCESS, result)
        self.assertIn("INSERT 0 900", result)
        self.assertIn("MERGE 900", result)
        self.log.info("-----session4------------")
        connect_thread4.join(600)
        result = connect_thread4.get_result()
        self.log.info(result)
        self.assertIn(self.constant.CREATE_TABLE_SUCCESS, result)
        self.assertIn("TRUNCATE", result)
        self.assertIn("INSERT 0 900000", result)

        self.log.info('----检查主备是否同步----')
        result = self.sta_1_sh.check_data_consistency()
        self.assertTrue(result)
        result = self.sta_2_sh.check_data_consistency()
        self.assertTrue(result)

        self.log.info('----检查数据一致性----')
        nodes_tuple = (self.primary_user_node,
                       self.sta1_dbuser, self.sta2_dbuser)
        flag = self.com.check_data_sample_by_all(r'\d', *nodes_tuple)
        self.assertTrue(flag)

        table_dict = self.com.format_sql_result(
            self.primary_sh.executDbSql(r'\d'))
        table_name = table_dict.get('Name')
        for name in table_name:
            select_sql = f'select count(*) from {name};'
            flag = self.com.check_data_sample_by_all(select_sql,
                                                     *nodes_tuple)
            self.assertTrue(flag)

    def tearDown(self):
        self.log.info("------------------删除表-----------------")
        time.sleep(20)
        sql = f"drop table if exists {self.tb_name} cascade;" \
            f"drop table if exists {self.tb_name}1 cascade;" \
            f"drop table if exists {self.tb_name}2 cascade;"\
            f"drop table if exists {self.tb_name}3 cascade;"
        result = self.primary_sh.executDbSql(sql)
        self.log.info(result)
        self.log.info(f"-----{os.path.basename(__file__)} end-----")
