'''
public function
'''
import _thread
import os
import re
import subprocess
import time

from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro


class Common:
    def __init__(self, node_name='default'):
        # 通过root用户还是数据库安装的用户执行脚本。 数据库部署用户默认使用dbuser（见 conf/nodes.yml）
        self.rootNode = Node(node=node_name)
        self.logger = Logger()
        self.Constant = Constant()
        # cfe加压工具安装路径
        self.CFE_PATH = macro.CFE_PATH
        # newpress加压工具安装路径
        self.PRESS_PATH = macro.PRESS_PATH

    def console(self, msg):
        '''
        打印关键日志
        :param msg: 需要打印的信息
        '''
        self.logger.info("=============== {} ===============".format(msg))

    def parsePidList(self, msg):
        '''
        根据 ps -ef | grep xxx 解析出 pids

        示例： ps -ef | grep gs_dump | grep -v grep ：
            zxb      70503 70502  0 16:12 ?        00:00:00 bash -c              source /home/zxb/gaussdb.bashrc;
            zxb      70536 70503 91 16:12 ?        00:00:01 gs_dump -W *          -f /home/zxb/dump_bakup.sql -p
        执行返回： ['70503', '70536']
        '''

        pidList = []

        msgList = msg.splitlines()
        if (len(msgList) > 0):
            for i in range(0, len(msgList)):
                pidStrList = msgList[i].split(' ')
                pidIndex = 0
                for index in range(0, len(pidStrList) - 1):
                    if (pidStrList[index].strip() != ''):
                        pidIndex += 1
                    if (pidIndex == 2):
                        pidList.append(pidStrList[index])
        return pidList

    def thread(self, body):
        '''
        用作多线程函数
        :param body:要执行的边
        :return:
        '''
        res = self.rootNode.sh(body).result()
        self.console(res)

    def cpuPress(self):
        '''
        利用cfe工具注入cpu 100%占用故障
        返回是否注入成功
        :return:
        '''
        # 查看主机cpu规格
        body = '''lscpu | awk '/CPU\(s\):/' | awk '!/NUMA/ {print $2}' &'''
        res = self.rootNode.sh(body).result()
        self.console(res)
        cpu_nums = 0
        try:
            cpu_nums = int(re.findall('\d{1,10}', res)[-1])
            self.console(cpu_nums)
        except Exception as e:
            self.console("Get cpu info failed. Error msg {}".format(e))
            return False
        cpu_id1, cpu_id2 = 0, cpu_nums - 1
        # 对[0,cpu_nums-1]的cpu加压到100%
        body1 = '''{}/cfe "inject rCPU_Overloadal (cpuid1, cpuid2, usage) values({}, {}, 100)" &'''.format(
            self.CFE_PATH,
            cpu_id1,
            cpu_id2)
        self.console(body1)
        self.console("Fault Injection(cpu full) Start")
        _thread.start_new_thread(self.thread, ("Thread1", self, body1,))
        time.sleep(5)
        self.console("Fault Injection(cpu full) End")
        # 检查是否注入成功，查看cpu利用率是否达到故障注入预期
        body2 = '''sar -u 1 5 | awk '{if(NR==9) print $3}' &'''
        res2 = self.rootNode.sh(body2).result()
        self.console(res2)
        try:
            cpu_occupied = float(re.findall('\d{1,2}\\.\d{2}', res2)[0])
            if cpu_occupied < 90:
                self.console("Fault Injuection Failed. Please check your press tool")
                return False
        except Exception as e:
            self.console("Something wrong! Maybe lack cfe or sar tools. Error msg {}".format(e))
            return False
        return True

    def cpuPressClear(self):
        '''
        消除cpu占用100%故障
        :return:
        '''
        body = '''{}/cfe "clean rCPU_Overloadal" &'''.format(self.CFE_PATH)
        self.console("Restore Environment Begin")
        res = self.rootNode.sh(body).result()
        self.console(res)
        self.console("Restore Environment Complete!")

    def memPress(self):
        '''
        构造内存压力到90%
        :return: bool 返回是否注入成功
        '''
        body = '''for i in {1..10};do /root/newpress/press.arm64 -m 90 & done &'''
        self.console("Inject 90% of memory start")
        res = self.rootNode.sh(body).result()
        self.console(res)
        time.sleep(5)
        # 检查注入是否成功
        body1 = '''cat /proc/meminfo | awk '/Mem/' '''
        res1 = self.rootNode.sh(body1).result()
        self.console(res1)
        try:
            re_body = re.match('(\w*:\s*\d*\s*\w*)\n(\w*:\s*\d*\s*\w*)', res1)
            mem_total = int(re.findall('\d{1,100}', re_body.group(1))[-1])
            mem_free = int(re.findall('\d{1,100}', re_body.group(2))[-1])
            self.console("MemTotal: {0} kB, MemFree: {1} kB".format(mem_total, mem_free))
        except Exception as e:
            self.console("Check Memory Failed when calculate memory occupied. Errot: {}".format(e))
            return False
        else:
            occupied = round((mem_total - mem_free) / mem_total, 2)
            self.console("Mem Occupied is {.2f}%".format(occupied))
            if occupied < 90 * 0.85:
                self.console("Inject Failed. Because Occupied < 90%")
                return False

        self.console("Inject 90% of memory complete")
        return True

    def memPressClear(self):
        '''
        清楚构造的内存压力故障
        :return:
        '''
        self.console("Clear Memotry Fault Begin!")
        body = '''ps -ef | grep press | grep -v 'grep' | awk '{print $2}' | xargs kill -9'''
        res = self.rootNode.sh(body).result()
        self.console(res)
        self.console("Clear Memotry Fault Complete!")

    def startTPCC(self, db_node, tpcc_path, tpcc_command='. runBenchmark.sh props.pg'):
        '''
        执行TPCC
        :param db_node: 传入节点，一般是root节点
        :param tpcc_res: 存储tpcc执行信息
        :param tpcc_path: tpcc路径
        :param tpcc_command: tpcc指令
        :return: 返回tpcc_res
        '''
        start_tpcc = f"source /etc/profile; cd {tpcc_path}; {tpcc_command}"
        self.console(start_tpcc)
        result = db_node.sh(start_tpcc).result()
        pattern = r'Measured tpmC \(NewOrders\).*Measured tpmTOTAL.*' \
                  r'Session Start.*Session End.*Transaction Count'
        regex_res = re.search(pattern, result, re.I | re.S)
        if regex_res:
            return regex_res.group()
        else:
            return result

    def createLargeNumTable(self, node, t_name, s_num=1, e_num=10, insert_sign=False, del_sign=False):
        '''
        创建表t_name,并插入e_num-s_num行数据
        :param node: 传入节点，一般是dbuser
        :param t_name: 表名
        :param s_num: 起始值
        :param e_num: 结束值
        :param insert_sign: 为True则仅执行插入
        :param del_sign: 为True则仅执行删除表
        :return: 返回执行结果信息
        '''
        sql = ""
        if insert_sign:
            sql += f"start transaction; insert into {t_name} values (generate_series({s_num},{e_num}),'aaa'); commit;"
        else:
            if del_sign:
                sql += f'''drop table if exists {t_name};'''
            else:
                sql += f'''
                        start transaction;
                        drop table if exists {t_name};
                        create table {t_name}(t_id integer, t_desc character(20));
                        insert into {t_name} values (generate_series({s_num},{e_num}),'aaa');
                        select count(*) from {t_name};
                        commit;
                        '''
        shell_cmd = f'''
                source {macro.DB_ENV_PATH};
                gsql -d {node.db_name} -p {node.db_port} -r -c "{sql}"
                '''
        self.console(shell_cmd)
        msg = node.sh(shell_cmd).result()
        return msg

    def fileFindString(self, file_path, content):
        '''
        文件中查找字符串
        :param file_path: 文件路径
        :param content: 字符串内容
        :return: 查找到则返回True，否则返回False
        '''
        # 注意这里的打开文件编码方式
        with open(file_path, "r", encoding='UTF-8') as fp:
            strr = fp.read()
            if strr.find(content) != -1:
                return True

    def getFileStringRows(self, node, file_path, content):
        '''
        获取文件中字符串匹配的所有行
        :param file_path: 文件路径
        :param content: 字符串内容
        :return: 文件中字符串匹配的所有行
        '''
        cmd = f"sed -n '/{content}/p' {file_path}"
        self.console(cmd)
        msg = node.sh(cmd).result()
        return msg

    def replaceFileContentBySed(self, node, file_path, old_str, new_str):
        '''
        替换文件内容，可指定node
        :param node: 传入节点
        :param file_path: 文件路径
        :param old_str: 原字符串
        :param new_str: 目标字符串
        :return: 执行结果
        '''
        cmd = f'''sed -i "s/{old_str}/{new_str}/g" {file_path}'''
        self.console(cmd)
        msg = node.sh(cmd).result()
        return msg

    def replaceFileContent(self, file_path, old_str, new_str):
        '''
        替换文件内容，不可指定node
        :param file_path: 文件路径
        :param old_str: 原字符串
        :param new_str: 目标字符串
        '''
        file_data = ""
        with open(file_path, "r", encoding="utf-8") as f:
            for line in f:
                if old_str in line:
                    line = line.replace(old_str, new_str)
                file_data += line
        with open(file_path, "w", encoding="utf-8") as f:
            f.write(file_data)

    def recordFileNum(self, node, path):
        ls_cmd = 'ls -l ' + path + ' |grep "^-"|wc -l'
        self.console(ls_cmd)
        result = node.sh(ls_cmd).result()
        return result

    def getlatestPglog(self, node, logpath, dnnodename):
        showfile_cmd = "ls -al " + logpath + "/" + dnnodename
        showfile_msg = node.sh(showfile_cmd).result()
        self.console(showfile_msg)
        getlatestlog = showfile_msg.rsplit(" ", 1)
        return getlatestlog[1]

    # function : find the lastest modifed audit log
    def findlastModifiedFile(self, path, filename, period, node):
        find_cmd = 'find ' + path + ' -name \'' + filename + '\' -type f -mmin -' + period + ' -ls'
        self.console(find_cmd)
        result = node.sh(find_cmd).result()
        self.console(result)
        if len(result) > 0:
            tmp = result.rsplit(' ', 1)[1]
            result = tmp.rsplit('/', 1)[1]
        return result

    def getOrigfilesize(self, list_info, flag):
        result = [i for i, x in enumerate(list_info) if x.find(flag) != -1]
        if len(result) == 0:
            return 0
        else:
            self.console(result)
            self.console(list_info[result[0]])
            ori_size = list_info[result[0]].split(' ')
            self.console('-----' + str(ori_size))
            count = 0
            index = 0
            for i in range(len(ori_size)):
                if i >= len(ori_size):
                    break
                if ori_size[i] != ' ':
                    count = count + 1
                else:
                    pass
                if count == 5:
                    index = i
                    break
            return index

    def trans_num_to_alp(self, orgin_str):
        '''
        转换数字为字母
        :param orgin_str: 需要转换的字符串
        :return: 返回转换后的字符串
        '''
        trantab = str.maketrans('1234567890', 'abcdefghij')
        des_str = orgin_str.translate(trantab)
        return des_str

    def kill_pid(self, node, sigterm):
        '''
        获取进程pid并接受kill信号
        :param node: 传入节点
        :param sigterm: kill信息，如9杀死一个进程
        :return: 执行结果
        '''
        pid_list = []
        kill_cmd = ''
        cmd = f"lsof -i:{node.db_port}"
        self.console(cmd)
        msg = node.sh(cmd).result()
        self.console(msg)

        pid_lines = msg.strip().splitlines()
        for i in pid_lines:
            pid_line = i.strip().split()
            if pid_line[1].isnumeric():
                pid_list.append(pid_line[1])

        for pid in pid_list:
            kill_cmd += f"kill -{sigterm} {pid};"
        self.console(kill_cmd)
        msg = node.sh(kill_cmd).result()
        self.console(msg)
        return msg

    def mod_file_rows(self, node, file_path, start_row, end_row, contant):
        '''
        修改文件中的某些行内容
        :param node: 传入节点
        :param file_path: 文件路径
        :param start_row: 起始行
        :param end_row: 结束行
        :param contant: 要修改为的内容
        :return: 执行结果
        '''
        cmd = f"sed -i '{start_row},{end_row}c {contant}' {file_path};"
        self.console(cmd)
        msg = node.sh(cmd).result()
        self.console(msg)
        return msg

    def file_sql_execute(self, node, sqlfile, count, db_name=None):
        '''
        执行sql脚本文件count次
        :param sqlfile: sql文件路径
        :param node: 数据库节点
        :param count: 执行次数
        :param db_name: 可传入数据库名
        :return:
        '''
        if db_name is None:
            db_name = node.db_name
        for i in range(count):
            filesql_cmd = f"source {macro.DB_ENV_PATH};gsql -d {db_name} -p {node.db_port}  -r -f {sqlfile}"
            msg = node.sh(filesql_cmd).result()
            self.console('sqlexecute result: ' + msg)

    def getAvailSize(self, node, path, command=''):
        '''
        path所在盘符剩余空间情况
        #df {path}
        Filesystem     1K-blocks     Used Available Use% Mounted on
        /dev/xvde1     103080224 97837396         0 100% /openGauss
        :param node: 传入节点
        :param path: 文件路径
        :param command: df支持的参数，如-h
        :return: Available值
        '''
        avail_size = ''
        getsize_cmd = f'df {command} {path}'
        self.console(getsize_cmd)
        getsize_msg = node.sh(getsize_cmd).result()
        self.console(getsize_msg)
        msg_list = getsize_msg.splitlines()
        if len(msg_list) > 1:
            pid_str_list = msg_list[1].split()
            if len(pid_str_list) > 4:
                avail_size = pid_str_list[3]

        return avail_size

    def scp_file(self, node, file_name, target_path, del_sign=False,
                 source_dir=None):
        """
        传输当前主机上的文件到node所在主机target_path下
        :param node: 目标节点
        :param file_name: 文件名
        :param target_path: 目标主机上的路径
        :param del_sign: 删除标志位
        :param source_dir: 源文件路径，不指定，默认为macro.SCRIPTS_PATH
        :return:
        """
        source_dir = source_dir if source_dir else macro.SCRIPTS_PATH
        filepath_1 = os.path.join(source_dir, file_name)
        self.console(filepath_1)
        newsql_path1 = os.path.join(target_path, file_name)

        if not del_sign:
            mkdir_cmd = f'if [ ! -d {target_path} ];' \
                        f'then mkdir {target_path};fi'
            self.console(mkdir_cmd)
            msg = node.sh(mkdir_cmd).result()
            self.console(msg)

            self.console('filepath_1:' + filepath_1)
            self.console('newsql_path1:' + newsql_path1)

            cmd = f'ls {newsql_path1}'
            self.console(cmd)
            msg = node.sh(cmd).result()
            if self.Constant.NO_FILE_MSG not in msg:
                cmd = f'rm -rf {newsql_path1}'
                self.console(cmd)
                msg = node.sh(cmd).result()
                self.console(msg)
            node.scp_put(filepath_1, newsql_path1)
        else:
            # 删除scp的脚本文件
            rm_sqlfile = f'rm -rf {newsql_path1}'
            self.console(rm_sqlfile)
            msg = node.sh(rm_sqlfile).result()
            self.console(msg)

    def getfilesize(self, filename, node):
        check_cmd = 'ls -al ' + filename
        result = node.sh(check_cmd).result()
        self.console(result)
        filesize = result.split()[4]
        self.console(filesize)
        return filesize

    def judge_tpcc_stop(self, db_node):
        result = db_node.sh('ps -ef| grep runBen').result()
        self.logger.info('runBenchmark pid is: ' + result)
        while result.find('runBenchmark') > -1:
            time.sleep(20)
            result = db_node.sh('ps -ef| grep runBen').result()
            self.logger.info(result)
            pass

    def get_queue(self, queue):
        output = []
        while not queue.empty():
            output.append(queue.get())
        result = ''.join('%s' % id for id in output)
        self.logger.info(result)
        return result

    def getPidFromCmd(self, msg):
        msg_list = msg.splitlines()
        if len(msg_list) > 1:
            pid_str_list = msg_list[1].split(' ')
            pid_index = 0
            for index in range(0, len(pid_str_list) - 1):
                if pid_str_list[index].strip() != '':
                    pid_index += 1
                if pid_index == 2:
                    return pid_str_list[index]
        return ""

    def getPidFromGrep(self, msg):
        msgList = msg.splitlines()

    def getfilesize(self, filename, nodeinfo):
        check_cmd = 'ls -al ' + filename
        result = nodeinfo.sh(check_cmd).result()
        self.logger.info(result)
        filesize = result.split()[4]
        self.logger.info(filesize)
        return filesize

    def kill_pid_keyword(self, keyword, sigterm, usernode):
        pid_list = []
        kill_cmd = ''
        pid = 'ps ux | grep -w \'{keyword}\' | grep -v grep'.format(keyword=keyword)
        pmsg = usernode.sh(pid).result()
        self.logger.info(pid)
        self.logger.info(pmsg)
        for i in range(5):
            if keyword not in pmsg:
                self.logger.info('did not find pid, try again')
                pmsg = usernode.sh(pid).result()
                self.logger.info(pid)
                self.logger.info(pmsg)
            else:
                break

        pid_lines = pmsg.strip().splitlines()
        for i in pid_lines:
            pid_line = i.strip().split()
            if pid_line[1].isnumeric():
                pid_list.append(pid_line[1])

        for pid in pid_list:
            kill_cmd += f"kill -{sigterm} {pid};"
        self.logger.info(kill_cmd)
        msg = usernode.sh(kill_cmd).result()
        self.logger.info(msg)
        return msg

    def format_sql_result(self, result_msg):
        value_list = []
        header_line = ''
        result_dict = {}
        result_lines = result_msg.strip().splitlines()
        flag=True
        for line in result_lines:
            if '-------' in line:
                flag=False
                divider_index = result_lines.index(line)
                header_line = result_lines[divider_index - 1].strip()
                for i in range(divider_index + 1, len(result_lines)):
                    value_list.append(result_lines[i].strip())
        if flag:
            self.logger.info(result_lines)

        header_line = header_line.split('|')
        for i in range(len(header_line)):
            result_dict[f'{header_line[i].strip()}'] = []
            if '(' in result_msg and 'row' in result_msg and ')' in result_msg:
                value_list_len = len(value_list) - 1
            else:
                value_list_len = len(value_list)
            for j in range(value_list_len):
                result_dict[f'{header_line[i].strip()}'].append(value_list[j].split('|')[i].strip())

        return result_dict

    def check_data_sample_by_all(self, sql_cmd, *nodes):
        nodes_tuple = nodes
        result_dict = {}
        self.logger.info(sql_cmd)
        for node in nodes_tuple:
            shell_cmd = f"source {macro.DB_ENV_PATH}; gsql -d {node.db_name} -p {node.db_port} -r -c '{sql_cmd}'"
            msg = node.sh(shell_cmd).result()
            result_dict[node.node] = self.format_sql_result(msg)
        self.logger.info(result_dict)
        for i in range(len(result_dict) - 1):
            if list(result_dict.values())[i] != list(result_dict.values())[i + 1]:
                return False
        return True

    def cfe_inject(self, node, command):
        '''
        使用cfe工具注入故障
        :param node: 数据库节点
        :param command: 指令，例如 ‘rfile_full  (diskname)  values  (/dev/sdc)’
        :return: 执行结果
        '''
        shell_cmd = f'''{macro.CFE_PATH}/cfe 'inject {command}' '''
        self.logger.info(shell_cmd)
        msg = node.sh(shell_cmd).result()
        return msg

    def cfe_query(self, node, command):
        '''
        查询cfe工具注入故障
        :param node: 数据库节点
        :param command: 指令，例如 ‘rfile_full’
        :return: 执行结果
        '''
        shell_cmd = f'''{macro.CFE_PATH}/cfe 'query {command}' '''
        self.logger.info(shell_cmd)
        msg = node.sh(shell_cmd).result()
        return msg

    def cfe_clean(self, node, command):
        '''
        清除cfe工具注入故障
        :param node: 数据库节点
        :param command: 指令，例如 ‘rfile_full where (filename=/dev/sdc)’
        :return: 执行结果
        '''
        shell_cmd = f'''{macro.CFE_PATH}/cfe 'clean {command}' '''
        self.logger.info(shell_cmd)
        msg = node.sh(shell_cmd).result()
        return msg

    def get_disk_name(self, file_name):
        '''
        function: 获取对应目录所在磁盘名称
        :param: file_name 文件名称
        :return: 磁盘名称
        '''
        cmd = f"df -h {file_name}"
        self.logger.info(cmd)
        msg = self.rootNode.sh(cmd).result()
        self.logger.info(msg)
        db_disk_name = msg.splitlines()[-1].split()[0].strip()
        self.logger.info(db_disk_name)
        return db_disk_name

    def get_node_num(self, node):
        """
        function:获取数据库集群节点个数
        :param node: 传入集群中任意节点即可
        :return: 节点个数
        """
        conf_path = os.path.join(macro.DB_INSTANCE_PATH,
                                 macro.DB_PG_CONFIG_NAME)
        shell_cmd = f"cat {conf_path} |grep 'pgxc_node_name'|" \
            f"cut -d '=' -f 2|cut -d '#' -f 1"
        self.logger.info(shell_cmd)
        msg = node.sh(shell_cmd).result()
        self.logger.info(msg)
        node_list = msg.strip('\'').strip('dn_').split('_')
        self.logger.info(node_list)
        node_num = len(node_list)
        return node_num

    def check_system_timezone(self, *nodes_tuple):
        """
        查询系统时区是否正确，若不正确则进行修改
        *nodes_list：集群节点列表
        """
        self.logger.info(f'=============检查系统时区配置================')
        cmd = 'ls -l /etc/localtime'
        file_assert = 'Binary file (standard input) matches'
        right_asia_file = ''
        wrong_node_list = []
        right_node_idx = ''
        for node_idx in nodes_tuple:
            result = Node(node=node_idx).sh(cmd).result()
            self.logger.info(result)
            time_file = result.split('->')[1].strip()
            check_cmd = f'cat {time_file} | grep CST'
            self.logger.info(check_cmd)
            result = Node(node=node_idx).sh(check_cmd).result()
            self.logger.info(result)
            if file_assert in result:
                    right_asia_file = time_file
                    right_node_idx = node_idx
                    self.logger.info(
                        f"right file is {right_asia_file} at "
                        f"{Node(node=right_node_idx).ssh_host}")
            else:
                wrong_node_list.append(node_idx)
        if len(wrong_node_list) != 0 and \
                len(wrong_node_list) < len(nodes_tuple):
            # 存在时区文件正确的节点
            if 'Asia' not in right_asia_file:
                self.logger.info('!!! Time file is wrong!!!')
                return False
            else:
                for wrong_node in wrong_node_list:
                    if  right_node_idx not in nodes_tuple:
                        self.logger.info('!!!there is no right node!!!')
                        return False
                    scp_cmd = f"scp -r {Node(node=right_node_idx).ssh_user}" \
                        f"@{Node(node=right_node_idx).ssh_host}" \
                        f":{right_asia_file} " \
                        f"{os.path.dirname(right_asia_file)}"
                    self.logger.info(scp_cmd)
                    result = Node(node=wrong_node).sh(scp_cmd).result()
                    self.logger.info(result)
                    mod_cmd = f'ln -sf {right_asia_file} /etc/localtime'
                    self.logger.info(mod_cmd)
                    result = Node(
                        node=wrong_node).sh(mod_cmd).result()
                    self.logger.info(result)
        elif len(wrong_node_list) == len(nodes_tuple):
            self.logger.info("!!!all nodes' time file are wrong!!!")
            return False
        else:
            self.logger.info("all nodes' Asia file is right")

        wrong_node_list = []
        right_node_idx = 'No right'
        self.logger.info(f'================{wrong_node_list}')
        new_york = '/usr/share/zoneinfo/America/New_York'
        self.logger.info('=========检查America/New_York====================')
        cmd = f"cat {new_york}  | grep EDT"
        for node_idx in nodes_tuple:
            result = Node(node=node_idx).sh(cmd).result()
            self.logger.info(result)
            if file_assert not in result:
                wrong_node_list.append(node_idx)
            else:
                right_node_idx = node_idx
        if len(wrong_node_list) != 0 and \
                len(wrong_node_list) < len(nodes_tuple):
            # 存在时区文件正确的节点
            for wrong_node in wrong_node_list:
                if 'No right' == right_node_idx:
                    self.logger.info('!!!there is no right node!!!')
                    return False
                scp_cmd = f"scp -r " \
                    f"{Node(node=right_node_idx).ssh_user}" \
                    f"@{Node(node=right_node_idx).ssh_host}:" \
                    f"{new_york} " \
                    f"{os.path.dirname(new_york)}"
                self.logger.info(scp_cmd)
                result = Node(node=wrong_node).sh(scp_cmd).result()
                self.logger.info(result)
        elif len(wrong_node_list) == len(nodes_tuple):
            self.logger.info("!!!all new_york time file are wrong!!!")
            return False
        else:
            self.logger.info("all nodes' new_york file is right")
        return True

    def get_sh_result(self, node, cmd):
        """
        function:获取sh结果
        :param node: 节点信息
        :param cmd: 执行指令
        :return: sh回显结果
        """
        self.logger.info(cmd)
        result = node.sh(cmd).result()
        self.logger.info(result)
        return result

    def parse_sql_res_with_col_name(self, sql_res, isprint=True):
        """
        解析sql查询结果，按行查询结果,包含列名信息的字典列表
        :param sql_res: 数据库查询结果
        :param isprint: 是否打印解析过程信息
        :return: 按行查询结果,包含列名信息的字典列表，例如：row_cols[0]为第一行所有列名+列值的字典
        """
        global db_dict
        col_name = [j.strip() for j in sql_res.splitlines()[0].split('|')]
        pattern = r'-+?\n(.*)\(\d+\srows?\)'
        res = re.search(pattern, sql_res, re.M | re.S)
        rows = [[j.strip() for j in i.split('|')] for i in
                res.group(1).splitlines()]
        row_cols = []
        for row in rows:
            db_dict = dict()
            for col_index, item in enumerate(col_name):
                db_dict[item] = row[col_index]
            row_cols.append(db_dict)
        if isprint:
            self.logger.info(row_cols)

        return row_cols

    def get_jdbc(self, db_type='postgres', target_path='.'):
        """
        获取jdbc并移动至指定路径
        :param db_type: jdbc类型,例如：opengauss、postgres、mysql
        :param target_path: 需要将jdbc移动至的目标路径
        :return: jar包名称
        """
        cmd = f"yat info|grep 'Yat home path'|awk -F ':' '{{print $2}}'"
        self.logger.info(cmd)
        yat_home_path = subprocess.getoutput(cmd).strip()
        lib_path = os.path.join(yat_home_path, 'lib')
        self.logger.info(lib_path)
        cmd = f"ls {lib_path} | grep '{db_type}'| head -1"
        self.logger.info(cmd)
        jar_name = subprocess.getoutput(cmd)
        if target_path != '.':
            mkdir_cmd = f"mkdir -p {target_path}"
            self.logger.info(mkdir_cmd)
            res = subprocess.getoutput(mkdir_cmd)
            self.logger.info(res)
        if jar_name:
            cmd = f"cp {os.path.join(lib_path, jar_name)} {target_path}"
            self.logger.info(cmd)
            res = subprocess.getoutput(cmd)
            self.logger.info(res)
        else:
            jar_name = None
            self.logger.error(f"未找到{db_type}jar包")
        return jar_name

    def get_pid(self, node, process=''):
        """
        function:获取当前所需进程的pid号
        :param process: 进程名
        :return: 进程号
        """
        pid = f"""ps ux | grep -v grep | \
            grep {process} | awk '{{{{print$2}}}}'"""
        self.logger.info(pid)
        res = node.sh(pid).result()
        return res

    def check_node_exists(self, node_name='dbuser'):
        """
        判断节点是否在conf/node.yaml配置
        :param node_name:节点名称
        :return: 节点存在返回True，否在返回False
        """
        try:
            Node(node=node_name)
            return True
        except Exception as e:
            self.logger.info('远程主机node信息不存在!' + str(e))
            return False

    def parse_sql_query_res(self, sql_res, isprint=True):
        """
        解析sql查询结果
        :param sql_res: 数据库返回的查询结果
        :param isprint: 是否打印解析过程信息
        :return: rows按行查询结果（去除列名及其他无关信息的所有value），rows[0]为第一行值
                 cols按列查询结果（去除列名及其他无关信息的所有value），cols[0]为第一列值
        """
        pattern = r'-+?\n(.*)\(\d+\srows?\)'
        res = re.search(pattern, sql_res, re.M | re.S)
        rows = [[j.strip() for j in i.split('|')] for i in
                res.group(1).splitlines()]
        cols = list(map(list, zip(*rows)))
        if isprint:
            self.logger.info(res.group(1))
            self.logger.info(rows)
            self.logger.info(cols)

        return rows, cols