#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import locale
import os
import platform
import re
import shutil
import signal

from common.common import touch_file, read_result_file, execute_cmd
from common.const import CMDResult
from common.file_common import delete_file, create_dir_recursive, change_owner_by_name, exec_lchown_dir_recursively, \
    delete_path
from common.util.exec_utils import exec_mkdir_cmd
from oracle import logger
from oracle.common.common import chown_file_path_owner, login_oracle_database, exec_sql_cmd, parse_html_result, \
    login_asm_instance
from oracle.common.constants import PluginPathConstant, Platform, ScriptExitCode, RETURN_INVALID, PexpectResult, \
    HostInfoConstants
from oracle.common.linux_common import get_min_of_last_log_scn_by_thread, execute_linux_sqlplus_cmd, \
    execute_linux_rman_cmd
from oracle.common.user_env_common import get_oracle_user_env_by_linux, get_asm_user_env_by_linux
from oracle.common.windows_common import write_tmp_file, set_db_silence_sql, execute_windows_sqlplus_cmd, \
    get_file_lines, get_dbf_contents, create_empty_file, find_content_in_file, create_dir, execute_windows_rman_cmd, \
    generate_additional_info


def get_last_log_scn_info(login_db_params, log_is_backed_up, job_id):
    last_log_scn_info = get_min_of_last_log_scn_by_thread(login_db_params, log_is_backed_up, job_id)
    last_log_scn = last_log_scn_info[0]
    last_log_time = last_log_scn_info[1]
    return last_log_scn, last_log_time


def get_online_redo_log_info(pid, params, tmp_rst):
    instance_name = params.get("instance_name", "")
    if platform.system().lower() == "windows":
        tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_online_log_info_{pid}.sql"
    else:
        tmp_sql = f"{PluginPathConstant.STMP_PATH}/get_online_log_info_{pid}.sql"
    content = f"SPOOL {tmp_rst}\n" \
              f"set linesize 999;\n" \
              "col MEMBER for a255;\n" \
              "select GROUP#, MEMBER from v$logfile order by GROUP#;\n" \
              "SPOOL OFF;\n" \
              "exit;\n"
    write_tmp_file(tmp_sql, content)
    set_db_silence_sql(tmp_sql)
    if platform.system().lower() == "windows":
        ret = execute_windows_sqlplus_cmd(pid, params, tmp_sql, tmp_rst, instance_name)
    else:
        touch_file(tmp_rst)
        chown_file_path_owner(tmp_rst, params.get("db_install_user", ""))
        chown_file_path_owner(tmp_sql, params.get("db_install_user", ""))
        ret = execute_linux_sqlplus_cmd(pid, params, tmp_sql)
    delete_file(tmp_sql)
    return ret


def read_additional_db_file(additional_path, split_letter):
    file_ids = set()
    file_paths = set()
    if not os.path.exists(additional_path):
        logger.error(f'additional path is not exist')
        return file_ids, file_paths

    if platform.system().lower() == "windows":
        encoding = locale.getdefaultlocale()[1]
        db_files_lines = read_result_file(additional_path, encoding=encoding).splitlines()
    else:
        db_files_lines = read_result_file(additional_path).splitlines()
    for line in db_files_lines:
        # 1;SYSAUX;3;/oracle/app/oradata/ORCL/sysaux01.dbf
        result = re.split(split_letter, line.strip())
        file_ids.add(result[2])
        file_paths.add(result[3])
    return file_ids, file_paths


def get_datafile_info_cdb(pid, params, tmp_rst):
    sql_cmd = (f"SPOOL {tmp_rst}\n"
               f"set linesize 999\n"
               f"col tsFile for a520\n")
    sql_cmd += (f"SELECT "
                f"c.CON_ID CON_ID, c.TABLESPACE_NAME tsName, f.File# fNo, f.Name tsFile, f.bytes/1024/1024 fMbSize "
                f"FROM cdb_data_files c, V$DATAFILE f WHERE c.RELATIVE_FNO = f.FILE# and c.CON_ID = 1;\n")
    sql_cmd += (f"SPOOL OFF\n"
                f"exit;\n")

    if platform.system().lower() == "windows":
        tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_datafile_info_cdb_{pid}.sql"
        write_tmp_file(tmp_sql, sql_cmd)
        set_db_silence_sql(tmp_sql)
        instance_name = params.get('instance_name')
        ret_code = execute_windows_sqlplus_cmd(pid, params, tmp_sql, tmp_rst, instance_name)
    else:
        tmp_sql = f"{PluginPathConstant.STMP_PATH}/get_datafile_info_cdb_{pid}.sql"
        write_tmp_file(tmp_sql, sql_cmd)
        set_db_silence_sql(tmp_sql)
        if not os.path.exists(tmp_rst):
            touch_file(tmp_rst)
        chown_file_path_owner(tmp_rst, params.get("db_install_user", ""))
        chown_file_path_owner(tmp_sql, params.get("db_install_user", ""))
        ret_code = execute_linux_sqlplus_cmd(pid, params, tmp_sql)
    if ret_code != ScriptExitCode.SUCCESS:
        encoding = locale.getdefaultlocale()[1]
        logger.error(f"get_datafile_info_cdb failed, pid:{pid}, ret:{ret_code}, "
                     f"error is: {read_result_file(tmp_rst, encoding=encoding)}.")
    delete_file(tmp_sql)
    return ret_code


def build_backup_database_sql(tmp_sql, datafile_min_scn, params):
    pid = params.get('pid')
    datafile_info = params.get('datafile_info')
    ora_pre_version = int(params.get('ora_pre_version'))
    is_enc_bk = params.get('is_enc_bk')
    if platform.system().lower() == "windows":
        backup_datafile_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/backup_datafile_sql_{pid}.sql"
    else:
        backup_datafile_sql = f"{PluginPathConstant.STMP_PATH}/backup_datafile_sql_{pid}.sql"
    logger.info("Build datafile sql.")
    build_backup_datafile_sql(backup_datafile_sql, datafile_info, params)

    content = f""
    if ora_pre_version > 12:
        content += f"alter session set events 'trace[krb.*] disk disable, memory disable';\n"
    content += f"configure backup optimization off;\n"
    content += f"configure controlfile autobackup off;\n"
    content += f"set nocfau;\n"
    content += f"configure maxsetsize to unlimited;\n"
    if is_enc_bk == 0 and ora_pre_version > 11:
        content += f"configure encryption for database off;\n"

    content += "RUN {\n"
    content += build_backup_database_sql_run_scope(params, datafile_min_scn, pid)
    content += "}\n"

    write_tmp_file(tmp_sql, content)


def build_backup_database_sql_run_scope(params, datafile_min_scn, pid):
    level = int(params.get('level'))
    db_name = params.get('db_name')
    main_backup_path = params.get('main_backup_path')
    backup_tmp = params.get('backup_tmp')
    open_mode = params.get('open_mode')
    pdb_names = params.get('pdb_names')
    pdb_names = [pdb_name for pdb_name in pdb_names if not pdb_name == "PDB$SEED"]
    pdb_names.sort()
    log_parent_path = main_backup_path[0:main_backup_path.rfind('\\')]
    if platform.system().lower() == "windows":
        backup_datafile_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/backup_datafile_sql_{pid}.sql"
        backup_tmp_format = f"{backup_tmp}\\%T_%U"
        log_path_format = f"{log_parent_path}\\%\\log\\arch_%"
        log_backup_format = f"{main_backup_path}\\log\\arch_%t_%s.log"
        controlfile_format = f"{main_backup_path}\\controlfile.ctl"
    else:
        backup_datafile_sql = f"{PluginPathConstant.STMP_PATH}/backup_datafile_sql_{pid}.sql"
        backup_tmp_format = f"{backup_tmp}/%T_%U"
        log_path_format = f"{log_parent_path}/%/log/arch_%"
        log_backup_format = f"{main_backup_path}/log/arch_%t_%s.log"
        controlfile_format = f"{main_backup_path}/controlfile.ctl"
    content = "    SET COMMAND ID TO 'ProtectAgent_Backup';\n"
    if level == 0:
        content += f"    change archivelog like '%/epoch-scn_%/arch_%' uncatalog;\n"
    content += build_backup_database_sql_channel(params)
    if os.path.exists(backup_datafile_sql):
        encoding = locale.getdefaultlocale()[1]
        build_datafile_content = read_result_file(backup_datafile_sql, encoding=encoding)
        delete_file(backup_datafile_sql)
        content += build_datafile_content
    if level == 1 or level == 2:
        dbf_contents = get_dbf_contents(main_backup_path, pid)
        for dbf in dbf_contents:
            content += f"    catalog datafilecopy '{dbf}' tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA' level 0;\n"
    content = build_content_depend_level(level, content, db_name, pdb_names, backup_tmp_format)
    content += f"    DELETE FORCE NOPROMPT ARCHIVELOG like '{log_path_format}';\n".upper()
    if open_mode != 'READONLY' and open_mode != "READONLYWITHAPPLY":
        content += f"    sql 'alter system archive log current';\n"
    content += f"    backup as copy archivelog from scn {datafile_min_scn} format '{log_backup_format}' reuse;\n"
    content += (f"    backup as copy current controlfile format '{controlfile_format}' "
                f"tag 'EBACKUP-{db_name}-{pdb_names[0]}-CTL' reuse;\n")
    return content


def build_content_depend_level(level, content, db_name, pdb_names, backup_tmp_format):
    if level == 1:
        content += (
            f"    backup incremental level 1 cumulative for recover of copy with "
            f"tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA' database format '{backup_tmp_format}';\n")
        content += f"    recover copy of database with tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA';\n"
        content += f"    delete noprompt backup tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA';\n"
    elif level == 2:
        content += f"    backup incremental level 1 for recover of copy with " \
                   f"tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA' database format '{backup_tmp_format}';\n"
        content += f"    recover copy of database with tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA';\n"
        content += f"    delete noprompt backup tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA';\n"
    return content


def build_backup_database_sql_channel(params):
    content = ""

    datafile_info = params.get('datafile_info')
    qos = params.get('qos')
    channels = params.get('channels')
    db_is_cluster = params.get('db_is_cluster')
    node_number = params.get('node_number')
    ip_port_info = params.get('ip_port_info')
    dbf_num = get_file_lines(datafile_info)
    real_qos = 0
    if qos > 0:
        if dbf_num >= channels or dbf_num == 0:
            real_qos = qos * 1024 // channels
        else:
            real_qos = qos * 1024 // dbf_num
    index = 1
    if db_is_cluster == 1 and node_number != 0:
        content += build_backup_database_sql_channel_for_cluster(index, channels, ip_port_info, qos, real_qos)
    else:
        content += build_backup_database_sql_channel_for_none_cluster(index, channels, qos, real_qos)

    content += f"    configure device type disk parallelism {channels};\n"
    return content


def build_backup_database_sql_channel_for_cluster(index, channels, ip_port_info, qos, real_qos):
    content = ""
    while index <= channels:
        for instance_node_info in ip_port_info:
            [sid_info, host_info, port_info] = instance_node_info.split(",")
            if qos == 0:
                content += f"    allocate channel eBackup{'{:0>2d}'.format(index)} type disk connect /@(" \
                           f"DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(Host={host_info})(Port={port_info}))(" \
                           f"CONNECT_DATA=(SID={sid_info})));\n"
            else:
                content += f"    allocate channel eBackup{'{:0>2d}'.format(index)} type disk rate {real_qos}k " \
                           f"connect /@(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(Host={host_info})(Port={port_info}))(" \
                           f"CONNECT_DATA=(SID={sid_info})));\n"

            index += 1
            if index > channels:
                break
    return content


def build_backup_database_sql_channel_for_none_cluster(index, channels, qos, real_qos):
    content = ""
    while index <= channels:
        if qos == 0:
            content += f"    allocate channel eBackup{'{:0>2d}'.format(index)} type disk;\n"
        else:
            content += f"    allocate channel eBackup{'{:0>2d}'.format(index)} type disk rate {real_qos}k;\n"
        index += 1
    return content


def build_backup_datafile_sql(full_backup_sql, datafile_info, params):
    create_empty_file(full_backup_sql)
    backup_path_list = params.get("backup_path_list")
    db_name = params.get("db_name")
    additional = params.get("additional")
    pid = params.get('pid')
    pdb_names = params.get('pdb_names')
    pdb_names = [pdb_name for pdb_name in pdb_names if not pdb_name == "PDB$SEED"]
    pdb_names.sort()
    if not full_backup_sql or not os.path.exists(datafile_info):
        logger.error(f"Param is none cannot build backup sql, pid:{pid}.")
        return
    path_num = len(backup_path_list.split(";"))
    index_datafile = 0
    content = f"    backup as copy incremental level 0 tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA' \n"
    init_size = os.path.getsize(full_backup_sql)
    cur_strlen = init_size
    file_contents = read_datafile_info(datafile_info)
    file_nos = []
    for line_content in file_contents:
        if "rows selected" in line_content or "row selected" in line_content or "Session altered" in line_content:
            continue
        db_info_content = line_content.split()
        if len(db_info_content) == 0:
            continue
        f_no, fs_file = db_info_content[2], db_info_content[3]
        if f_no in file_nos:
            continue
        result = False
        if params.get("level") != 0:
            result = find_content_in_file(fs_file, f"{additional}/dbfiles")
        if not result:
            index_path = index_datafile % path_num
            index_datafile += 1
            path = backup_path_list.split(";")[index_path]
            create_dir_for_linux_and_windows(pid, path, f_no, params)
            if platform.system().lower() == "windows":
                tmp_str = f"    (datafile {f_no} format '{path}\\{f_no}\\FNO-%f_TS-%N.dbf')\n"
            else:
                tmp_str = f"    (datafile {f_no} format '{path}/{f_no}/FNO-%f_TS-%N.dbf')\n"
            cur_strlen += len(tmp_str)
            if cur_strlen > 4096:
                content += f";\n    backup as copy incremental level 0 tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA' \n"
                cur_strlen = init_size
            content += f"{tmp_str}\n"
            file_nos.append(f_no)
    if cur_strlen == init_size:
        delete_file(full_backup_sql)
    else:
        content += f";\n"
        write_tmp_file(full_backup_sql, content)


def create_dir_for_linux_and_windows(pid, path, f_no, params):
    if platform.system().lower() == "windows":
        create_dir(pid, f"{path}/{f_no}", params.get("asm_instance"), params.get("oracle_home"))
    else:
        create_dir_recursive(os.path.join(path, f_no))
        change_owner_by_name(os.path.join(path, f_no), params.get("oracle_user"), params.get("oracle_group"))


def read_datafile_info(datafile_info):
    with open(datafile_info, "r") as file:
        line = file.read()
        file_contents = line.splitlines()
        logger.info(f"Datafile info file_content: {file_contents}")
    return file_contents


def change_archive_log_unavailable(pid, params, enc_type, archive, backup_type):
    sql_cmd = ""
    if platform.system().lower() == "windows":
        tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/change_archive_log_unavailable_sql_{pid}.sql"
        tmp_rst = f"{PluginPathConstant.WINDOWS_TMP_PATH}/change_archive_log_unavailable_rst_{pid}.txt"
        if backup_type == "log":
            sql_cmd += f"change archivelog like '{archive}\\resetlogs_id%\\arch_%.log' unavailable;\n"
        else:
            sql_cmd += (f"change archivelog like "
                        f"'{PluginPathConstant.WINDOWS_ORACLE_DATA_FILE_PATH}%\\log\\arch_%.log' unavailable;")
    else:
        tmp_sql = f"{PluginPathConstant.STMP_PATH}/change_archive_log_unavailable_sql_{pid}.sql"
        tmp_rst = f"{PluginPathConstant.STMP_PATH}/change_archive_log_unavailable_rst_{pid}.txt"
        if backup_type == "log":
            sql_cmd += f"change archivelog like '{archive}/resetlogs_id%/arch_%.log' unavailable;\n"
        else:
            sql_cmd += f"change archivelog like '{PluginPathConstant.ORACLE_DATA_FILE_PATH}%/log/arch_%.log' " \
                       f"unavailable;"
    sql_cmd = sql_cmd.upper()
    write_tmp_file(tmp_sql, sql_cmd)
    logger.info(f"cross_check_archive_log_linux, pid:{pid}.")

    if platform.system().lower() == Platform.WINDOWS:
        ret_code = execute_windows_rman_cmd(pid, params, enc_type, tmp_sql, tmp_rst)
    else:
        touch_file(tmp_rst)
        chown_file_path_owner(tmp_rst, params.get("db_install_user", ""))
        chown_file_path_owner(tmp_sql, params.get("db_install_user", ""))
        ret_code = execute_linux_rman_cmd(pid, params, enc_type, tmp_sql, tmp_rst)
    delete_file(tmp_sql)
    delete_file(tmp_rst)
    return ret_code


def get_datafile_info_pdb(pid, params, tmp_rst, pdb_name):
    sql_cmd = f"SPOOL {tmp_rst} APPEND\n" \
              f"set linesize 999\n" \
              f"col tsFile for a520\n"
    sql_cmd += f"SELECT t.CON_ID CON_ID, t.Name tsName, f.File# fNo, f.Name tsFile, f.bytes/1024/1024 fMbSize " \
               f"FROM V$TABLESPACE t, V$DATAFILE f WHERE t.TS# = f.TS# and t.CON_ID = f.CON_ID and f.CON_ID = " \
               f"(SELECT CON_ID FROM V$CONTAINERS WHERE NAME = '{pdb_name}') order by bytes desc;\n"
    sql_cmd += f"SPOOL OFF\n" \
               f"exit;\n"

    if platform.system().lower() == "windows":
        tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_datafile_info_pdb_{pid}.sql"
        write_tmp_file(tmp_sql, sql_cmd)
        set_db_silence_sql(tmp_sql)
        instance_name = params.get('instance_name')
        ret_code = execute_windows_sqlplus_cmd(pid, params, tmp_sql, tmp_rst, instance_name)
    else:
        tmp_sql = f"{PluginPathConstant.STMP_PATH}/get_datafile_info_pdb_{pid}.sql"
        write_tmp_file(tmp_sql, sql_cmd)
        set_db_silence_sql(tmp_sql)
        if not os.path.exists(tmp_rst):
            touch_file(tmp_rst)
        chown_file_path_owner(tmp_rst, params.get("db_install_user", ""))
        chown_file_path_owner(tmp_sql, params.get("db_install_user", ""))
        ret_code = execute_linux_sqlplus_cmd(pid, params, tmp_sql)
    if ret_code != ScriptExitCode.SUCCESS:
        encoding = locale.getdefaultlocale()[1]
        logger.error(f"get_datafile_info_pdb failed, pid:{pid}, ret:{ret_code}, "
                     f"error is: {read_result_file(tmp_rst, encoding=encoding)}.")
    delete_file(tmp_sql)
    return ret_code


def exist_addition_file(job_id, parent_path, addition_datafile_set):
    for file_id in addition_datafile_set:
        file_dir = os.path.join(parent_path, file_id)
        if not os.path.exists(file_dir):
            logger.error(f'job id: {job_id}, dir: {file_dir} is not exist')
            return False
        sub_file = os.listdir(file_dir)
        logger.info(f'sub_file : {len(sub_file)}')
        if len(sub_file) == 0:
            logger.error(f'job id: {job_id}, file: {file_dir} is empty')
            return False
    return True


def cross_check_backup_dbf(pid, params, enc_type, db_name, pdb_names):
    if not db_name:
        logger.error(f"Db name is none can not crosscheck datafile, pid:{pid}.")
        return RETURN_INVALID
    logger.info(f"Begin to crosscheck copy datafile, pid:{pid}, db_name: {db_name}.")
    if platform.system().lower() == "windows":
        tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/rman_cross_check_data_{pid}.sql"
        tmp_rst = f"{PluginPathConstant.WINDOWS_TMP_PATH}/rman_cross_check_data_{pid}.txt"
    else:
        tmp_sql = f"{PluginPathConstant.STMP_PATH}/rman_cross_check_data_{pid}.sql"
        tmp_rst = f"{PluginPathConstant.STMP_PATH}/rman_cross_check_data_{pid}.txt"
    pdb_names = [pdb_name for pdb_name in pdb_names if not pdb_name == "PDB$SEED"]
    pdb_names.sort()
    sql_cmd = f"crosscheck datafilecopy tag 'EBACKUP-{db_name}-{pdb_names[0]}-DATA';\n" \
              f"exit;\n"
    write_tmp_file(tmp_sql, sql_cmd)
    if platform.system().lower() == "windows":
        ret_code = execute_windows_rman_cmd(pid, params, enc_type, tmp_sql, tmp_rst)
    else:
        touch_file(tmp_rst)
        chown_file_path_owner(tmp_rst, params.get("db_install_user", ""))
        chown_file_path_owner(tmp_sql, params.get("db_install_user", ""))
        ret_code = execute_linux_rman_cmd(pid, params, enc_type, tmp_sql, tmp_rst)
    if ret_code != ScriptExitCode.SUCCESS:
        encoding = locale.getdefaultlocale()[1]
        logger.warning(f"Execute crosscheck copy datafile failed, pid:{pid}, ret_code:{ret_code}, "
                       f"error is:{read_result_file(tmp_rst, encoding=encoding)}.")
    delete_file(tmp_sql)
    delete_file(tmp_rst)
    return ret_code


def cross_check_archive_log(pid, params, enc_type):
    logger.info(f"cross_check_archive_log, pid:{pid}.")
    if platform.system().lower() == "windows":
        tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/rman_cross_check_log_{pid}.sql"
        tmp_rst = f"{PluginPathConstant.WINDOWS_TMP_PATH}/rman_cross_check_log_{pid}.txt"
    else:
        tmp_sql = f"{PluginPathConstant.STMP_PATH}/rman_cross_check_log_{pid}.sql"
        tmp_rst = f"{PluginPathConstant.STMP_PATH}/rman_cross_check_log_{pid}.txt"
    sql_cmd = f"crosscheck archivelog all;\n" \
              f"exit;\n"
    write_tmp_file(tmp_sql, sql_cmd)
    if platform.system().lower() == "windows":
        ret_code = execute_windows_rman_cmd(pid, params, enc_type, tmp_sql, tmp_rst)
    else:
        touch_file(tmp_rst)
        chown_file_path_owner(tmp_rst, params.get("db_install_user", ""))
        chown_file_path_owner(tmp_sql, params.get("db_install_user", ""))
        ret_code = execute_linux_rman_cmd(pid, params, enc_type, tmp_sql, tmp_rst)
    if ret_code != ScriptExitCode.SUCCESS:
        encoding = locale.getdefaultlocale()[1]
        logger.warning(f"Execute crosscheck archive_log failed, pid:{pid}, ret_code:{ret_code}, "
                       f"error is:{read_result_file(tmp_rst, encoding=encoding)}.")
    delete_file(tmp_sql)
    delete_file(tmp_rst)
    return ret_code


def get_pdb_data_files(pid, login_db_params, pdb_names, datafile_rst):
    ret_code = get_datafile_info_cdb(pid, login_db_params, datafile_rst)
    if ret_code != ScriptExitCode.SUCCESS:
        logger.error(f"get_datafile_info_cdb failed, pid:{pid}.")
        delete_file(datafile_rst)
        return ret_code
    for pdb_name in pdb_names:
        ret_code = get_datafile_info_pdb(pid, login_db_params, datafile_rst, pdb_name)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"get_datafile_info_pdb failed, pid:{pid}.")
            delete_file(datafile_rst)
            return ret_code
    return ScriptExitCode.SUCCESS


def get_all_table_space_from_pdb(pid, db_instance_name, install_user_name, pdb_name):
    logger.info(f"get_all_table_space_from_pdb, database instance name: {db_instance_name}, "
                f"install user name: {install_user_name}, pid: {pid}.")
    cmd = f"SELECT NAME FROM V$TABLESPACE WHERE CON_ID = (SELECT CON_ID FROM V$CONTAINERS WHERE NAME = '{pdb_name}');"
    result, child = login_oracle_database(pid, db_instance_name, install_user_name, timeout=30)
    if not result:
        return set()
    try:
        return_code, std_out = exec_sql_cmd(child, cmd, PexpectResult.FILE_PATH)
        if return_code:
            if PexpectResult.FILE_PATH[3] in str(std_out):
                logger.warn(f"Failed get_all_table_space_from_pdb: {cmd}, pid: {pid}.")
                return set()
            table_space_names = parse_html_result(std_out)
            table_space_set = set()
            for table_space_name in table_space_names:
                table_space_set.add(table_space_name.get('NAME'))
            return table_space_set
        logger.error(f"Failed to execute cmd: {cmd}, pid: {pid}.")
        return set()
    except Exception as exception:
        logger.info(f"Failed to execute cmd: {cmd}, pid: {pid}, exception is: {exception}.")
        return set()
    finally:
        if platform.system().lower() == "windows":
            child.kill(signal.SIGTERM)
        else:
            child.close()


def backup_orapw_file(pid, additional, params):
    logger.info(f"backup_orapw_file, pid:{pid}.")
    if platform.system().lower() == "windows":
        generate_additional_info(pid, additional, params)
    else:
        generate_additional_info_linux(pid, additional, params)


def generate_additional_info_linux(pid, additional, params):
    oracle_home = params.get("oracle_home")
    db_name = params.get("db_name")
    db_instance = params.get("db_instance")
    asm_install_user = params.get("asm_install_user")
    if not asm_install_user:
        asm_install_user = 'grid'
    db_pwd_file = get_orapw_path_by_srvctl(params)
    logger.info(f"get_orapw_path_by_srvctl db_pwd_file:{db_pwd_file}.")
    if not db_pwd_file:
        shutil.copy2(f"{oracle_home}/dbs/orapw{db_instance}", f"{additional}/dbs/orapw{db_name}")
    elif db_pwd_file.startswith('+'):
        orapw_path = os.path.join(PluginPathConstant.STMP_PATH, 'orapw')
        exec_mkdir_cmd(orapw_path)
        oracle_group = params.get("oracle_group")
        exec_lchown_dir_recursively(orapw_path, asm_install_user, oracle_group)
        orapw_file = os.path.join(orapw_path, f"orapw{db_name}")
        if os.path.exists(orapw_file):
            delete_file(orapw_file)
        asm_user_env = get_asm_user_env_by_linux(asm_install_user)
        copy_orapw_cmd = f"su - {asm_install_user} -c '{asm_user_env}asmcmd pwcopy {db_pwd_file} {orapw_file}'"
        return_code, std_out, std_err = execute_cmd(copy_orapw_cmd)
        if return_code != CMDResult.SUCCESS:
            logger.error(f"asmcmd pwcopy failed, exec {copy_orapw_cmd}, std_out {std_out} std_err {std_err}")
        if not os.path.exists(f"{additional}/dbs/orapw{db_name}"):
            shutil.move(orapw_file, f"{additional}/dbs")
        delete_path(orapw_path)
    else:
        shutil.copy2(f"{db_pwd_file}", f"{additional}/dbs/orapw{db_name}")
    logger.info(f"Success generate additional info, pid:{pid}.")


def get_orapw_path_by_srvctl(params):
    db_install_user = params.get("db_install_user")
    db_name = params.get("db_name")
    db_instance = params.get("db_instance")
    oracle_mark = "/###ORACLE_MARK###/"
    oracle_user_env = get_oracle_user_env_by_linux(db_instance)
    cmd = f"su - {db_install_user} -c 'echo \"{oracle_mark}\";{oracle_user_env}srvctl config database -d {db_name}'"
    logger.info(f"generate_additional_info_linux, cmd {cmd}")
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != CMDResult.SUCCESS:
        logger.error(f"generate_additional_info_linux failed, std_out {std_out} std_err {std_err}")
        return ""
    std_out_list = str(std_out).split(oracle_mark)
    result_lines = std_out_list[1].strip().split('\n')
    db_pw_file = ''
    for line in result_lines:
        logger.info(f"generate_additional_info_linux, line {line}")
        if "Password file" in line:
            content = line.split(":", 1)
            db_pw_file = content[1].strip()
            break
    return db_pw_file


def get_all_pdbs(pid, db_instance_name, install_user_name):
    """
    获取所有pdb
    :param pid: pid
    :param db_instance_name: 数据库实例名
    :param install_user_name: 数据库安装用户名
    :return:
    """
    logger.info(f"get_all_pdbs.")
    cmd = "select name,open_mode from v$pdbs;"
    result, child = login_oracle_database(pid, db_instance_name, install_user_name, timeout=30)
    if not result:
        return []
    try:
        return_code, std_out = exec_sql_cmd(child, cmd, PexpectResult.FILE_PATH)
        if return_code:
            if PexpectResult.FILE_PATH[3] in str(std_out):
                logger.warn(f"Failed to get_all_pdbs: {cmd}, pid: {pid}.")
                return []
            return parse_html_result(std_out)
        logger.error(f"Failed to execute cmd: {cmd}, pid: {pid}.")
        return []
    finally:
        if platform.system().lower() == "windows":
            child.kill(signal.SIGTERM)
        else:
            child.close()


def get_asm_spfile_path(pid, asm_instance_name, asm_install_user_name):
    logger.info(f"get_asm_spfile_path, pid: {pid}, asm_instance_name : {asm_instance_name}, "
                f"asm_install_user_name : {asm_install_user_name}")
    if not asm_install_user_name:
        asm_install_user_name = 'grid'
    result, child = login_asm_instance(pid, asm_instance_name, asm_install_user_name, timeout=30)
    if not result:
        raise Exception(f'Login database failed.')
    cmd = 'show parameter spfile;'
    try:
        exec_status, std_out = exec_sql_cmd(child, cmd)
    except Exception as exception:
        logger.error(f'exec cmd failed. {str(exception)}')
        raise Exception(f'exec sql cmd failed.') from exception
    finally:
        if platform.system().lower() == "windows":
            child.kill(signal.SIGTERM)
        else:
            child.close()
    spfile = ''
    if exec_status:
        results = parse_html_result(std_out)
        if not results:
            logger.error(f"get_asm_spfile_path results {results}")
            return spfile
        spfile = results[0].get('VALUE').replace('\r', '').replace('\n', '')
        logger.info(f"get_asm_spfile_path :{spfile}")
        return spfile
    logger.error(f"Failed to get_asm_spfile_path cmd: {cmd}, std_out: {std_out}.")
    return spfile


def get_ip_port_from_address(address):
    logger.info(f"get_ip_port_from_address: {address}")
    tmp_start = address.find(HostInfoConstants.IP_PREFIX)
    tmp_end = address.find(')', tmp_start)
    ip_info = address[tmp_start + len(HostInfoConstants.IP_PREFIX):tmp_end]
    tmp_start = address.find(HostInfoConstants.PORT_PREFIX)
    tmp_end = address.find(')', tmp_start)
    port = address[tmp_start + len(HostInfoConstants.PORT_PREFIX):tmp_end]
    logger.info(f"get_ip_port_from_address,ip_info {ip_info},port {port}")
    return ip_info, port
