#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import ast
import glob
import json
import os
import random
import re
import sqlite3
import time
import pwd
from datetime import datetime

import socket
import psutil

from common.common import check_path_legal, execute_cmd, output_execution_result, execute_cmd_list, \
    check_command_injection, retry_when_exception
from common.common_models import SubJobDetails, LogDetail
from common.const import CMDResult, IPConstant, SysData, SubJobStatusEnum
from common.util.exec_utils import exec_overwrite_file
from tpops.gaussdb.common.const import RoachConstant, RpcParamKey, GaussDBPath, JobInfo, GaussSubJobName, ErrorCode, \
    LogLevel, UserInfo, PermissionNode, InstanceStatus, VERSION
from tpops.gaussdb.common.log import log


def get_std_in_variable(str_env_variable: str):
    env_variable = ''
    input_dict = json.loads(SysData.SYS_STDIN)
    if input_dict.get(str_env_variable):
        env_variable = input_dict.get(str_env_variable)
    return env_variable


def get_input_dict_in_variable():
    input_dict = json.loads(SysData.SYS_STDIN)
    return input_dict


def report_job_details(job_id: str, sub_job_details: dict):
    try:
        result_info = exec_rc_tool_cmd(job_id, "ReportJobDetails", sub_job_details)
    except Exception as err:
        log.error(f"Invoke rpc_tool interface exception, err: {err}.")
        return False
    if not result_info:
        return False
    ret_code = result_info.get("code", -1)
    if ret_code != int(CMDResult.SUCCESS):
        log.error(f"Invoke rpc_tool interface failed, result code: {ret_code}.")
        return False
    return True


def exec_rc_tool_cmd(unique_id, interface_name, param_dict):
    """
    执行rc_tool命令
    @@param cmd: 需要执行的命令
    @@param in_param: 需要写入输入文件的命令参数
    @@param unique_id: 输入输出文件唯一标识
    @@return result:bool 命令执行结果
    @@return output:string 命令输出
    """

    def clear_file(path):
        ret = check_path_legal(path, GaussDBPath.GaussDB_LINK_PATH)
        if not ret:
            return
        if os.path.isfile(path):
            os.remove(path)

    cur_time = str(int((time.time())))
    unique_id = unique_id + interface_name + cur_time
    input_file_path = os.path.join(RpcParamKey.PARAM_FILE_PATH, RpcParamKey.INPUT_FILE_PREFFIX + unique_id)
    output_file_path = os.path.join(RpcParamKey.RESULT_PATH, RpcParamKey.OUTPUT_FILE_PREFFIX + unique_id)
    exec_overwrite_file(input_file_path, param_dict)

    cmd = f"sh {RpcParamKey.RPC_TOOL} {interface_name} {input_file_path} {output_file_path}"
    try:
        ret, std_out, std_err = execute_cmd(cmd)
        with open(output_file_path, "r", encoding='utf-8') as tmp:
            result = json.load(tmp)
    except Exception as err:
        raise err
    finally:
        # 执行命令后删除输入、输出文件
        clear_file(input_file_path)
        clear_file(output_file_path)
    if ret != CMDResult.SUCCESS:
        return {}
    return result


def is_valid_port(port):
    pattern = re.compile(r'^[1-9]\d{0,3}$')  # 端口号范围为1-65535
    if pattern.match(port):
        return True
    else:
        return False


def is_valid_ip(ip):
    if string_verify(ip):
        pattern = re.compile(r'^(?:\d{1,3}\.){3}\d{1,3}(?:\s*,\s*(?:\d{1,3}\.){3}\d{1,3})*$')
        if pattern.match(ip):
            return True
        else:
            return False
    return False


def string_verify(target_str):
    # 校验外部输入的目标字符串的长度
    if len(target_str) <= RpcParamKey.MAX_TARGET_LENGTH:
        return True
    return False


def extract_ip():
    """
    获取当前主机所有ip
    :return: list，主机ip
    """
    log.info(f"Start getting all local ips ...")
    local_ips = []
    ip_dict = {}
    try:
        ip_dict = psutil.net_if_addrs()
    except Exception as err:
        log.error(f"Get ip address err: {err}.")
        return local_ips
    for _, info_list in ip_dict.items():
        for i in info_list:
            if i[0] == RoachConstant.ADDRESS_FAMILY_AF_INET and i[1] != IPConstant.LOCAL_HOST:
                local_ips.append(i[1])
    log.info(f"Get all local ips success.")
    return local_ips


def set_restore_name():
    local_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    restore_name = f'Restorename_{local_time}'
    return restore_name


def set_backup_name():
    local_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    restore_name = f'Backupname_{local_time}'
    return restore_name


def process_exists(process_name: str):
    for proc in psutil.process_iter():
        if proc.name() == process_name:
            return True
    return False


def check_agent_roach_alive():
    check_cmd = f'ps -ef | grep {RoachConstant.PROCESS_NAME} | grep -v grep'
    return_code, _, err_str = execute_cmd(check_cmd)
    if return_code != CMDResult.SUCCESS:
        log.error(f'BackUpAgent is not alive {err_str}')
        return False
    return True


def get_process_info(location):
    log.info("start to query roach")
    return_code, stdout, err_str = execute_cmd('pgrep -f "backUpAgent.*host.*port"')
    if return_code != CMDResult.SUCCESS:
        log.error(f'Get process info failed! return_code:{err_str}')
        raise Exception("Get process info failed!")
    pid_list = stdout.split()
    if pid_list:
        first_pid = pid_list[0]
    else:
        log.error(f'No backupAgent pid Found')
        raise Exception("No backupAgent pid Found")
    check_cmd_list = [
        f"ps -p {first_pid} -o cmd", "sed -n '2p'", f"awk '{{print ${location}}}'"
    ]
    return_code, stdout, err_str = execute_cmd_list(check_cmd_list)
    if return_code != CMDResult.SUCCESS:
        log.error(f'Get process info failed! return_code:{err_str}')
        return ""
    if "\n" in stdout:
        stdout = stdout.split("\n")[0]
    if "=" in stdout:
        stdout = stdout.split("=")[1].strip()
    log.info(f"success to get_process_info {stdout}")
    return stdout


@retry_when_exception(retry_times=5, delay=random.randint(15, 30))
def get_agent_roach_host_and_port():
    log.info(f"step2-3-1 start sub_job_roach host_port")
    host = get_process_info(RoachConstant.BACKUP_AGENT_IP_POS)
    port = get_process_info(RoachConstant.BACKUP_AGENT_PORT_POS)
    if not host or not port:
        raise Exception("backUpAgent is not start")
    return host, port


def check_path_valid(input_path, parent_dir="/mnt/databackup/"):
    """
    将路径转为绝对路径，并校验起是否在指定父目录下,防止目录逃逸
    :path : 需要检验的目录
    :parent_dir: 父目录
    :return: bool
    """
    if not input_path or not parent_dir:
        return False
    if not isinstance(input_path, str) or not isinstance(parent_dir, str):
        return False
    return check_path_legal(input_path, parent_dir)


def read_file(path):
    with open(path, "r", encoding='utf-8') as tmp:
        result = json.load(tmp)
    log.debug(f"Read progress end")
    return result


def get_hostname():
    return socket.gethostname()


def invoke_rpc_tool_interface(unique_id: str, interface_name: str, param_dict: dict):
    def clear_file(path):
        log.info(f"try to delete file {path}")
        if os.path.isfile(path):
            log.info(f"start to delete {path}")
            os.remove(path)

    cur_time = str(int((time.time())))
    unique_id = unique_id + interface_name + cur_time
    input_file_path = os.path.join(RpcParamKey.PARAM_FILE_PATH, RpcParamKey.INPUT_FILE_PREFFIX + unique_id)
    output_file_path = os.path.join(RpcParamKey.RESULT_PATH, RpcParamKey.OUTPUT_FILE_PREFFIX + unique_id)
    output_execution_result(input_file_path, param_dict)

    cmd = f"sh {RpcParamKey.RPC_TOOL} {interface_name} {input_file_path} {output_file_path}"
    try:
        ret, std_out, std_err = execute_cmd(cmd)
    except Exception as err:
        raise err
    finally:
        # 执行命令后删除输入文件
        clear_file(input_file_path)
    if ret != CMDResult.SUCCESS:
        return {}

    # 读取文件成功后删除文件
    try:
        with open(output_file_path, "r", encoding='utf-8') as tmp:
            result = json.load(tmp)
    except Exception as err:
        raise err
    finally:
        clear_file(output_file_path)
    return result


def aggregate_single_copy_object_data(cache_path, object_data_path, repo_list, is_skip_same_append_file):
    """
    功能描述：将单个副本里的所有对象数据文件聚合成一个
    cache_path：合成的文件要存放的路径
    object_data_path：被合成的对象数据文件存放路径
    """
    concrete_object_db = "backupkey.db"
    log.info(f"Start to merge db in {object_data_path} to {cache_path}")
    if os.path.islink(os.path.join(cache_path, concrete_object_db)):
        os.remove(os.path.join(cache_path, concrete_object_db))

    try:
        object_conn = sqlite3.connect(os.path.join(cache_path, concrete_object_db))
    except Exception as ex:
        log.error(f"Connect sqlite {concrete_object_db} failed for {ex}.")
        return False
    object_cur = object_conn.cursor()
    if not object_conn or not object_cur:
        log.error(f"Connect sqlite {concrete_object_db} failed.")
        return False
    object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
    if not object_tables:
        create_xbsa_db(object_cur)
    object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
    if not object_tables or len(object_tables) == 0:
        log.error(f"Create dws table failed.")
        return False

    db_file_list = get_all_db_files(object_data_path)
    log.info(f"get db_file_list {db_file_list}")
    if not db_file_list:
        log.error("No db file")
        return False
    str_line_list = []
    close_resource_params = {
        'object_cur': object_cur,
        'object_conn': object_conn
    }
    build_str_line_list(db_file_list, close_resource_params, is_skip_same_append_file, repo_list, str_line_list)
    statement = "insert into BsaObjTable values ({})".format(','.join(['?'] * 20))
    execute_sql_with_retries(statement, str_line_list, close_resource_params, 'insert')
    object_conn.commit()
    close_resource(cache_path, concrete_object_db, object_conn, object_cur)
    return True


def build_str_line_list(db_file_list, close_resource_params, is_skip_same_append_file, repo_list, str_line_list):
    for db_file in db_file_list:
        temp_object_cur = sqlite3.connect(db_file).cursor()
        for line in temp_object_cur.execute("select * from BsaObjTable").fetchall():
            cur_object_name = line[2]
            # 同名文件覆盖写，新的覆盖旧的
            query_sql = f"select * from BsaObjTable where objectName = '{cur_object_name}'"
            query_result = execute_sql_with_retries(query_sql, [], close_resource_params, 'query')
            # 重复文件，覆盖写场景
            if query_result and 'global_barrier_records/hadr_barrier_' not in cur_object_name:
                log.warning(f"file is update, skip the old file")
                continue
            # 重复文件，追加写场景
            if query_result and not is_skip_same_append_file:
                merge_append_same_file(line, query_result, repo_list)
                continue

            str_line = str(line).replace("None", "''")
            tuple_str_line = ast.literal_eval(str_line)
            str_line_list.append(tuple_str_line)


def merge_time_body_info(time_body_infos):
    """
    合并区间列表中的重叠或相邻区间

    参数:
    time_body_infos (list): 区间列表，每个区间是一个包含两个元素的列表，表示区间的起始和结束时间

    返回:
    merged (list): 合并后的区间列表，每个区间是一个包含两个元素的列表，表示区间的起始和结束时间
    举例：
    举例1：
    输入：[[[10, 20], [21, 24]], [[23, 26]]

    输出：[[10, 26]]


    举例2：
    输入：[[[10, 20]], [[25, 26]]]

    输出：[[10, 20], [25, 26]]
    """
    # 将所有区间展平为一维列表
    flattened = [
        interval
        for sublist in time_body_infos
        for interval in sublist
    ]

    # 按照区间的起始时间排序
    flattened.sort(key=lambda x: x[0])

    merged = []
    for interval in flattened:
        # 如果merged为空，或者当前区间与merged中最后一个区间不重叠也不相邻，则直接添加
        if not merged or interval[0] > merged[-1][1] + 1:
            merged.append(interval)
        else:
            # 否则合并区间
            merged[-1][1] = max(merged[-1][1], interval[1])

    return merged


def execute_sql_with_retries(statement, str_line_list, close_resource_params, sql_type, max_retries=3):
    query_result = None
    object_cur = close_resource_params.get('object_cur', None)
    object_conn = close_resource_params.get('object_conn', None)
    for attempt in range(max_retries):
        try:
            if sql_type == 'query':
                query_result = object_cur.execute(statement).fetchall()
            else:
                # 执行sql启事务
                object_cur.execute("begin exclusive transaction")
                object_cur.executemany(statement, str_line_list)
            break
        except Exception as e:
            object_conn.rollback()
            if attempt < max_retries - 1:
                log.warning(f"Error executing SQL: {e}, retry {attempt + 1}...")
                time.sleep(5)
                continue
            else:
                log.error(f"After {max_retries} retries, SQL still cannot be executed: {e}")
                raise Exception(f"Exec execute_sql_with_retries error.")
    return query_result


def close_resource(cache_path, concrete_object_db, object_conn, object_cur):
    log.info("end to read line from source")
    object_cur.close()
    object_conn.close()
    xbsa_resource_path = os.path.join(cache_path, concrete_object_db)
    if os.path.isfile(xbsa_resource_path):
        set_user_and_group(xbsa_resource_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(xbsa_resource_path, PermissionNode.PERMISSION_700)


def merge_append_same_file(line, query_result, repo_list):
    new_restore_time_file_path = query_result[0][14]
    new_file_system = query_result[0][15]
    old_restore_time_file_path = line[14]
    old_file_system = line[15]
    log.info(f"Merge append file, get old system {old_file_system}, get new system {new_file_system}")
    new_mount_path = get_mount_path(new_file_system, repo_list)
    old_mount_path = get_mount_path(old_file_system, repo_list)
    new_file_path = new_mount_path + new_restore_time_file_path
    old_file_path = old_mount_path + old_restore_time_file_path
    merge_append_file(old_file_path, new_file_path)


def merge_append_file(old_path, new_path):
    log.info(f"Merge append file, get old path: {old_path}, get new_path: {new_path}")
    with open(old_path, 'r') as file1, open(new_path, 'r') as file2:
        data1 = file1.read()
        data2 = file2.read()

    exec_overwrite_file(new_path, data1 + data2, json_flag=False)


def get_mount_path(file_system_name, repo_list):
    log.info(f"get repo list {repo_list}")
    mount_path = ""
    # 构造文件系统名 - 文件系统路径的map
    # 遍历 repo_list 数组
    for repo in repo_list:
        # 遍历 filesystems 数组
        for filesystem in repo['filesystems']:
            # 判断 name 是否匹配
            if filesystem['name'] == file_system_name:
                # 输出对应的 mountPath
                mount_path = filesystem['mountPath'][0]
    return mount_path


def create_xbsa_db(object_cur):
    object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                       "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,"
                       "[bsaObjectOwner] VARCHAR(64),"
                       "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                       "[estimatedSize] VARCHAR(100) NOT NULL,"
                       "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                       "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                       "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                       "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                       "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")


def get_all_db_files(object_data_path):
    """
    获取所有要被聚合的db文件
    """
    db_file_list = []
    for host_key_path in os.listdir(object_data_path):
        db_path = os.path.join(object_data_path, host_key_path)
        if not os.path.isdir(db_path) or len(glob.glob(os.path.join(db_path, "*.db"))) == 0:
            log.warn(f"There is no object data in metadata path {db_path}.")
            continue

        for database in os.listdir(db_path):
            if not database.endswith(".db"):
                continue
            db_file_list.append(os.path.join(db_path, database))
    return db_file_list


def get_file_attribute(file_name, job_id):
    """
    获取指定文件/目录归属的用户，组
    """
    object_file_attribute = os.stat(file_name)
    log.info(f"Get user{object_file_attribute.st_uid} group({object_file_attribute.st_gid}) success. "
             f"main task:{job_id}")
    return object_file_attribute.st_uid, object_file_attribute.st_gid


def translate_date(date_str):
    pattern = r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+\d{4}$'
    if re.match(pattern, date_str):
        log.info("Date can be translate to timestamp")
    else:
        return 0

    date_obj = datetime.strptime(date_str[:-5], '%Y-%m-%dT%H:%M:%S')
    utc_offset = int(date_str[-5:-3]) * 3600 + int(date_str[-2:]) * 60
    trans_time = int(time.mktime(date_obj.timetuple())) - utc_offset
    return trans_time


def generate_timediff_size(backup_info):
    if "begin_time" not in backup_info or "size" not in backup_info or "end_time" not in backup_info:
        return 0, 0
    begin_time = translate_date(backup_info["begin_time"])
    end_time = translate_date(backup_info["end_time"])

    if not begin_time or not end_time:
        return 0, 0

    size = backup_info["size"]

    log.info(f"generate_timediff_size, size {size}, begin_time {begin_time}, end_time {end_time}")
    return end_time - begin_time, size * 1024


def write_progress_file_with_status_and_speed(job_info: JobInfo, task_status, progress, should_upload_speed,
                                              backup_info=None):
    log_detail = build_log_detail(job_info, task_status)

    # 获取速度，数据量大小
    time_file = os.path.join(job_info.cache_path, f'T{job_info.job_id}')
    # 如果backup_info不为空说明已传入备份信息，需要计算size和speed
    if backup_info and check_backup_info(backup_info):
        timediff, size = generate_timediff_size(backup_info)
        # 如果从backup_info中获取的时间差不正确，使用原来的计算方法
        if timediff <= 0:
            size, speed = query_size_and_speed(job_info.cache_path, job_info.job_id, job_info.copy_id)
        else:
            speed = size / timediff
    elif should_upload_speed and os.path.exists(time_file):
        size, speed = query_size_and_speed(job_info.cache_path, job_info.job_id, job_info.copy_id)
    else:
        size, speed = 0, 0
    log.info(f"write_progress_file_with_speed, size {size}, get speed {speed}")
    progress_str = SubJobDetails(taskId=job_info.job_id,
                                 subTaskId=job_info.sub_job_id,
                                 taskStatus=task_status,
                                 progress=progress,
                                 dataSize=size,
                                 speed=speed,
                                 logDetail=log_detail)
    log.info(f"write_progress_file_with_speed progress_str: {progress_str}")
    json_str = progress_str.dict(by_alias=True)
    progress_file = os.path.join(job_info.cache_path, f"progress_{job_info.job_id}_{job_info.sub_job_id}")
    exec_overwrite_file(progress_file, json_str)


def check_backup_info(backup_info):
    if backup_info is None:
        return False
    if backup_info.get("size", None) is None:
        return False
    return True


def build_log_detail(job_info: JobInfo, task_status):
    log_detail = []
    if task_status == SubJobStatusEnum.FAILED.value:
        log_detail_param = []
        if job_info.sub_job_type == GaussSubJobName.SUB_EXEC:
            job_info.error_code = ErrorCode.ERR_BACKUP_RESTORE
            log_detail_param.append(job_info.instance_id)
        log_detail = set_log_detail_with_params("plugin_task_subjob_fail_label", job_info.sub_job_id,
                                                job_info.error_code,
                                                log_detail_param,
                                                LogLevel.ERROR.value)
    if task_status == SubJobStatusEnum.COMPLETED.value:
        log_detail = set_log_detail_with_params("plugin_task_subjob_success_label",
                                                job_info.sub_job_id, 0, [],
                                                LogLevel.INFO.value)
    return log_detail


def set_log_detail_with_params(log_label, sub_job_id, err_code=None, log_detail_param=None,
                               log_level=LogLevel.INFO.value):
    err_dict = LogDetail(logInfo=log_label,
                         logInfoParam=[sub_job_id],
                         logTimestamp=int(time.time()),
                         logDetail=err_code,
                         logDetailParam=log_detail_param,
                         logLevel=log_level)
    logdetail = []
    logdetail.append(err_dict)
    return logdetail


def query_size_and_speed(cache_area, job_id, copy_id):
    size = 0
    speed = 0
    time_file = os.path.join(cache_area, f'T{job_id}')
    # 读取初始时间
    if os.path.islink(time_file):
        log.error(f"Link file:{time_file},stop writing.")
        return size, speed
    if not os.path.exists(time_file):
        log.error(f"Time file: {time_file} not exist")
        return size, speed
    with open(time_file, "r", encoding='UTF-8') as time_file:
        start_time = time_file.read().strip()

    # 读取已备份数据量
    total_data_size = get_total_data_size(cache_area, copy_id)
    if total_data_size == 0:
        return size, speed
    total_data_size *= 1024
    new_time = int((time.time()))
    timediff = new_time - int(start_time)
    if timediff == 0:
        log.info(f"query_size_and_speed, timediff is {timediff}")
        return size, speed
    try:
        speed = total_data_size / timediff
    except Exception as err:
        log.error(f"calculate speed failed, error is {err}")
    log.info(f"query_size_and_speed, datadiff: {total_data_size}, timediff: {timediff}, speed: {speed}, "
             f"start_time: {start_time}")
    return total_data_size, speed


def get_total_data_size(cache_area, copy_id):
    speed_dir_path = os.path.join(cache_area, "tmp", copy_id, "speed")
    total_data_size = 0
    if not os.path.isdir(speed_dir_path):
        return 0
    for item in os.listdir(speed_dir_path):
        host_key_path = os.path.join(speed_dir_path, item)
        if not os.path.isdir(host_key_path):
            continue
        total_data_size = scan_speed_dir_path(host_key_path, total_data_size)
    return total_data_size


def scan_speed_dir_path(host_key_path, total_data_size):
    for speed_item in os.listdir(host_key_path):
        speed_txt_file = os.path.join(host_key_path, speed_item)
        if not os.path.basename(speed_txt_file).startswith("xbsa_speed"):
            continue
        try:
            with open(speed_txt_file, "r", encoding='utf-8') as tmp:
                speed_obj = json.load(tmp)
        except Exception as err:
            log.error(f"scan_speed_dir_path, err: {err}.")
            continue
        if not speed_obj:
            continue
        total_data_size += speed_obj.get("totalSizeInMB", 0)
    return total_data_size


def check_path_in_white_list(path_: str):
    try:
        real_path = os.path.realpath(path_)
    except Exception as e:
        log.error("Path verification error.")
        return False, ''
    if check_command_injection(real_path):
        log.error("Invalid path.")
        return False, ''
    for path in RoachConstant.WHITE_FILE_LIST:
        if real_path.find(path) == 0:
            return True, real_path
    log.error("The path is not in the trustlist.")
    return False, ''


def get_uid_and_gid(user_name, group_name):
    return pwd.getpwnam(user_name).pw_uid, pwd.getpwnam(group_name).pw_gid


def set_user_and_group(path, user, group):
    ret, real_path = check_path_in_white_list(path)
    if not ret:
        return False
    try:
        uid, gid = get_uid_and_gid(user, group)
    except Exception as e_info:
        return False
    os.lchown(real_path, int(uid), int(gid))
    return True


def set_permisson(path, permisson):
    ret, real_path = check_path_in_white_list(path)
    if not ret:
        return False
    os.chmod(real_path, permisson)
    return True


def add_resource_info(response, instance, fun_inst):
    resource_info = dict()
    try:
        tpops_version = instance["agent_info"]["current_version"].split("_")[-1]
    except Exception as e:
        log.debug(f"get new tpops version exception: {e}")
        tpops_version = "OLTP OPS"
    instance_id = instance.get("id")
    log.info(f"tpopsVersion is {tpops_version}, instanceId: {instance_id}")
    resource_info["name"] = instance.get("name")
    resource_info["id"] = instance_id
    resource_info["type"] = "Database"
    resource_info["subType"] = "TPOPSGaussDBInstance"
    status = InstanceStatus.ACTIVE
    instance_status = instance.get("instance_status", InstanceStatus.NORMAL)
    if instance_status != InstanceStatus.NORMAL:
        status = InstanceStatus.INACTIVE
    resource_info["extendInfo"] = {
        "version": "version",
        'status': status,
        "region": "region",
        "tpopsVersion": tpops_version,
        "dbVersion": "tpops" if fun_inst == VERSION.TPOPS else "convergent"
    }
    response.resource_list.append(resource_info)


def get_token_fail_by_wrong_password(body):
    err_codes = ('errCode', 'error_code', 'code')
    target_codes = ('DBS.00110000', 'DBS.00110002', 'TPOPS.25-400009', 'TPOPS.25-400012')
    return any(key in body and body[key] in target_codes for key in err_codes)
