'''
case Type :数据库系统
case Name ：主机短时间有大量事务提交，产生大量日志，超过主备间日志复制的速度，双机状态正常
Create At: 2020-07-1
owner : @peilinqian
description:1、查看主备状态正常2、主机进行大规模插入操作3、插入过程查询主备状态，备机查询功能正常4、插入完成，查询主备状态，查询主备数据一致
history：
created： 2020/7/1  created by @peilinqian
'''

import os
import unittest
import time
import threading
from yat.test import Node
from testcase.utils.CommonSH import *
from yat.test import macro

from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import *

from testcase.utils.Common import *


class MyTest(unittest.TestCase):
    primary_sh = CommonSH('PrimaryDbUser')
    standby1_sh = CommonSH('Standby1DbUser')
    nodes_tuple = ('PrimaryDbUser', 'Standby1DbUser', 'Standby2DbUser')

    @RestartDbCluster(*nodes_tuple)
    def setUp(self):
        self.log = Logger()
        self.log.info('------------Opengauss_Reliability_Dbsys_Case015.py start------------')
        self.DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
        self.sqlfile = 'reliability_dbsys-01.sql'
        self.primary_root_node = Node(node='PrimaryRoot')
        self.primary_user_node = Node(node='PrimaryDbUser')
        self.standby1_root_node = Node(node='Standby1Root')
        self.standby1_user_node = Node(node='Standby1DbUser')
        self.com = Common()
        self.Constant = Constant()
        self.sqlfile = 'reliability_dbsys014-01.sql'

        # 目标主机新建路径
        self.to_path = os.path.dirname(macro.DB_INSTANCE_PATH) + '/test_script/'
        # 目标主机文件路径
        self.newsql_path = os.path.join(self.to_path, self.sqlfile)
        # 拷贝文件
        self.com.scp_file(self.primary_root_node, self.sqlfile, self.to_path)

    def test_main(self):
        global task
        # 准备测试数据，建表操作
        create_cmd = '''set synchronous_commit=on;
                    drop table if exists test_table;
                    create table test_table(id int,name varchar(20),name1 varchar(20),name2 varchar(20));'''
        self.log.info(create_cmd)
        create_msg = self.primary_sh.executDbSql(create_cmd)
        self.log.info(create_msg)

        # 新启5个进程，执行500次sql插入脚本
        threads = []
        for excute_count in range(5):
            newthread = threading.Thread(target=self.com.file_sql_execute,
                                         args=(self.primary_user_node, self.newsql_path, 10,))
            threads.append(newthread)
        for task in threads:
            task.setDaemon(True)
            task.start()

        time.sleep(5)
        # 判断插入过程中主备运行正常
        self.assertTrue(self.primary_sh.getDbClusterStatus('status'))
        self.log.info('compeletly done')

        # 判断插入过程中备机查询功能正常
        select_sql = 'select count(*) from test_table;'
        msg = self.primary_sh.executDbSql(select_sql)
        self.log.info(msg)

        # 等待所有线程执行完成关闭程序
        task.join()

        # 插入完成判断主备运行正常
        self.assertTrue(self.primary_sh.getDbClusterStatus('status'))

        # 校验备机是否完成数据同步
        self.log.info('校验备机是否完成数据同步')
        flag = self.standby1_sh.check_data_consistency()
        self.assertTrue(flag)

        # 判断插入完成主备数据一致
        select_sql = 'select count(*) from test_table;'
        nodes_tuple = (self.primary_user_node, self.standby1_user_node)
        flag = self.com.check_data_sample_by_all(select_sql, *nodes_tuple)
        self.assertTrue(flag)

    def tearDown(self):
        # 删除测试数据
        drop_sql = 'drop table if exists test_table;'
        msg = self.primary_sh.executDbSql(drop_sql)
        self.log.info(msg)

        rm_sqlfile = 'rm -r {path}'.format(path=self.newsql_path)
        self.primary_user_node.sh(rm_sqlfile)
