#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import sys

from common.util.cmd_utils import cmd_format
from gaussdbt.commons.const import RoachConstant, Env, ErrorCode
from gaussdbt.commons.gaussdbt_common import check_backpy_key, check_uid_consistency
from gaussdbt.commons.gaussdbt_param_protection import ParamProtection
from gaussdbt.commons.roach_meta_info import RoachMeta, get_full_backup_key, get_last_diff_backup_key
from gaussdbt.resource.gaussdbt_resource import GaussCluster
from common.common import execute_cmd, output_result_file, exter_attack
from common.common_models import ActionResult
from common.const import BackupTypeEnum, ExecuteResultEnum, RoleType, JobData, SysData, DeployType
from common.logger import Logger
from common.parse_parafile import get_user_name
from common.util.check_utils import is_valid_id

log = Logger().get_logger("gaussdbt_plugin.log")
JOB_ID = None


@exter_attack
def parse_sql_result(sql_result, key_word):
    result = sql_result.split("\n")
    key_index = 0
    for item in result:
        if item.find(key_word) != -1:
            key_index = result.index(item)
            break
    value_index = key_index + 2
    if len(result) > value_index:
        return result[value_index]
    return ""


def check_failover(job_pid):
    """
    检查是否发生过failover
    """
    log.info(f"Job id: {JOB_ID} check database failover")
    user_name = get_user_name(f"{Env.USER_NAME}_{job_pid}")
    cmd = f"su - {user_name} -c \
                        'zsql / as sysdba -q -c \"select RESETLOGS from SYS_BACKUP_SETS order by SCN desc limit 1;\"'"
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != "0":
        log.error(f"Fail to exec cmd check failover sql!")
        return False
    backup_set_result = parse_sql_result(std_out, "RESETLOGS")
    cmd = f"""su - {user_name} -c 'zsql / as sysdba -q -c "select RESETLOG from dv_database;"'"""
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != "0":
        log.error(f"Fail to exec cmd check failover sql!")
        return False
    database_result = parse_sql_result(std_out, "RESETLOG")
    if backup_set_result.split(' ')[0] == database_result.split('-')[0]:
        log.info("Database have not happened failover")
        return True
    else:
        log.info("Database have happened failover")
        return False


def check_database_restore(meta_filepath, job_pid):
    """
        检查当前数据库是否在做过恢复
        :param meta_filepath GaussRoach 备份工具meta路径
    """
    log.info(f"Job id: {JOB_ID} check database restore")
    try:
        backup_key = ParamProtection.get_latest_backup_key(meta_filepath)
    except Exception as exception:
        log.error(f"Exception is backup_key info failed")
        return False
    if not backup_key:
        log.error("Get latest backup key failed or parameter verification fails.")
        return False
    if not check_backpy_key(backup_key):
        log.error("Check backup key fail")
        return False
    user_name = get_user_name(f"{Env.USER_NAME}_{job_pid}")
    latest_set_sql = cmd_format("select COUNT(*) from SYS_BACKUP_SETS where tag = '{}';", backup_key)
    cmd = f"""su - {user_name} -c "zsql / as sysdba -q -c \\"{latest_set_sql}\\"" """
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != "0":
        log.error(f"Fail to exec cmd check restore sql!")
        return False
    result = parse_sql_result(std_out, "COUNT(*)")
    if result.strip() == "0":
        log.info("This database has restore")
        return False
    log.info("This database not restore")
    return True


def check_increment_backup(meta_path, media_path, backup_key):
    """
        检查增量备份的全量备份信息：是否存在全量副本
        :param backup_type: 备份类型
            meta_path: roach 元数据路径
            media_path:  roach 物理备份路径
        :return:
    """
    log.info(f"Job id: {JOB_ID} check increment copy")
    copyfile_media_dir = os.path.join(media_path, "roach", backup_key)
    copyfile_meta_dir = os.path.join(meta_path, "roach", backup_key)
    roach_meta = RoachMeta(meta_path)
    try:
        latest_backup_key = roach_meta.latest_backup_key()
    except Exception:
        log.error(f"Find last copy info failed")
        return False
    try:
        latest_copy_info = roach_meta.find_copy_info(latest_backup_key)
    except Exception:
        log.error(f"Find copy {latest_backup_key} info failed")
        return False
    if not latest_copy_info:
        log.error("Latest copy info not exit ")
        return False
    if latest_copy_info.get("BackupType") == "FULL" and latest_copy_info.get("BackupKey") != backup_key:
        log.error(f"Latest copy is not equal {backup_key}")
        return False
    copy_info = roach_meta.find_copy_info(backup_key)
    if not copy_info:
        log.error("Copy meta info not exit ")
        return False
    if copy_info.get("BackupType") != "FULL":
        log.error(f"Copy:{backup_key} is not a full copy")
        return False
    if not os.path.exists(copyfile_media_dir):
        log.error(f"Full copy media:{copyfile_media_dir}  not exit ")
        return False
    if not os.path.exists(copyfile_meta_dir):
        log.error(f"Full copy meta:{copyfile_meta_dir}  not exit ")
        return False
    log.info(f"Job id: {JOB_ID} check increment copy success")
    return True


def get_node_num(nodes_info):
    primary_num = 0
    standby_num = 0
    for node in nodes_info:
        if int(node.node_role) == RoleType.PRIMARY:
            primary_num = primary_num + 1
        elif int(node.node_role) == RoleType.STANDBY:
            standby_num = standby_num + 1
    return primary_num, standby_num


def check_env_info(backup_key, meta_path):
    """
    检查环境信息：集群的节点数量或者版本变化
    :return:
    """
    if not check_backpy_key(backup_key):
        log.error("Check backup key fail")
        return False
    all_nodes = GaussCluster.get_all_node()
    gdb_version = GaussCluster.get_gdb_version()
    metadata_path = os.path.join(meta_path, "meta", "roach", backup_key)
    backup_meta_info_path = os.path.join(metadata_path, "metadata.json")
    try:
        with open(backup_meta_info_path) as file_temp:
            meta_info = json.loads(file_temp.read())
    except Exception as ex:
        log.error(f"Open meta file failed error is {ex}")
        return False
    if gdb_version != meta_info.get("version"):
        log.error(f"GaussDB version have changed now is {gdb_version}")
        return False
    current_primary_num = 0
    current_standby_num = 0
    for node in all_nodes:
        if int(node.node_role) == RoleType.PRIMARY:
            current_primary_num = current_primary_num + 1
        elif int(node.node_role) == RoleType.STANDBY:
            current_standby_num = current_standby_num + 1
    nodes_info = meta_info.get("nodeInfo")
    if len(all_nodes) != len(nodes_info):
        log.error(f"GaussDB node number is different now is {len(all_nodes)}, copy is {len(nodes_info)}")
        return False
    primary_num, standby_num = get_node_num(all_nodes)
    if primary_num != current_primary_num:
        log.error(
            f"GaussDB primary node number is different now is {current_primary_num}, copy is {primary_num}")
        return False
    if standby_num != current_standby_num:
        log.error(
            f"GaussDB standby node number is different now is {current_standby_num}, copy is {standby_num}")
        return False
    return True


def check_diff_copy_info(roach_meta_path, roach_media_path, meta_data_path, backup_key):
    """
    检测最新的差异增量备份副本是否存在且备份类型是否为差异备份副本
    :param  meta_path: roach 元数据路径
            media_path:  roach 物理备份路径
            backup_key: 备份所依赖的最近的差异备份backup key
    """
    log.info(f"Job id: {JOB_ID} check diff copy")
    roach_meta = RoachMeta(roach_meta_path)
    try:
        latest_backup_key = roach_meta.latest_backup_key()
    except Exception:
        log.error("Find last copy info failed")
        return False
    try:
        latest_copy_info = roach_meta.find_copy_info(latest_backup_key)
    except Exception:
        log.error(f"Find copy {latest_backup_key} info failed")
        return False
    copyfile_media_dir = os.path.join(roach_media_path, "roach", latest_backup_key)
    copyfile_meta_dir = os.path.join(roach_meta_path, "roach", latest_backup_key)
    if not latest_copy_info:
        log.error("The latest copy meta info not exit ")
        return False
    if latest_copy_info.get("BackupType") == "FULL":
        if latest_copy_info.get("BackupKey") != backup_key:
            log.error("The latest copy is not incremental diff type base full copy")
            return False
    elif latest_copy_info.get("BackupType") == "INCREMENTAL":
        last_diff_key = get_last_diff_backup_key(meta_data_path)
        if latest_backup_key != last_diff_key:
            log.error("The latest copy is not diff type base full copy")
            return False
    if not os.path.exists(copyfile_media_dir):
        log.error(f"diff copy media:{copyfile_media_dir}  not exit ")
        return False
    if not os.path.exists(copyfile_meta_dir):
        log.error(f"diff copy meta:{copyfile_meta_dir}  not exit ")
        return False
    log.info(f"Job id: {JOB_ID} check diff copy success")
    return True


@exter_attack
def check_backup_type(repositories, backup_type, job_pid, backup_key=None):
    """
    检查备份类型：增量备份是否转全量
    :param  meta_path: roach 元数据路径
            media_path:roach 物理备份路径
            backup_key:增量备份或差异备份依赖的backup key
    """
    log.info(f"Job id: {JOB_ID} check backup type")
    meta_path = repositories.get("roach_meta_path")
    media_path = repositories.get("roach_data_path")
    meta_data_path = repositories.get("meta_data_path")
    if not backup_type:
        log.error(f"Job id: {JOB_ID} backup type is none")
        return False
    if backup_type == BackupTypeEnum.FULL_BACKUP:
        return True
    if not backup_key:
        log.error(f"Job id: {JOB_ID} backup key is none")
        return False
    if backup_type == BackupTypeEnum.INCRE_BACKUP:
        if not check_increment_backup(meta_path, media_path, backup_key):
            log.error(f"Job id: {JOB_ID} check increment backup is failed")
            return False
        if check_env_info(backup_key, meta_data_path) and check_failover(job_pid):
            log.info(f"Job id: {JOB_ID} check increment backup is success")
            return True
        log.error(f"Job id: {JOB_ID} check backup type is failed")
        return False
    if backup_type == BackupTypeEnum.DIFF_BACKUP:
        if not check_diff_copy_info(meta_path, media_path, meta_data_path, backup_key):
            log.error(f"Job id: {JOB_ID} check diff backup is failed")
            return False
    if backup_type == BackupTypeEnum.LOG_BACKUP:
        if os.path.exists(os.path.join(meta_path, RoachConstant.AUTO_FULL_BACKUP_FILE_NAME)):
            return False
    if check_env_info(backup_key, meta_data_path) \
            and check_database_restore(meta_data_path, job_pid) \
            and check_failover(job_pid):
        log.info(f"Job id: {JOB_ID} check diff, increment backup is success")
        return True
    log.error(f"Job id: {JOB_ID} check backup type is failed")
    return False


@exter_attack
def do_work(pid):
    try:
        param_protection = ParamProtection(pid)
    except Exception:
        log.error("Failed to parse param file")
        return ExecuteResultEnum.INTERNAL_ERROR
    repositories_info = param_protection.get_repository_list()
    job_backup_type = param_protection.get_backup_type()
    data_repository_path = repositories_info.get("data_repository", [""])[0]
    meta_repository_path = repositories_info.get("meta_repository", [""])[0]
    if not meta_repository_path:
        log.error(f"Meta_repository_path: {meta_repository_path} illegal")
        return ExecuteResultEnum.INTERNAL_ERROR
    if not data_repository_path:
        log.error("Data repository path is null")
        output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR,
                              bodyErr=ErrorCode.ERROR_INCREMENT_TO_FULL,
                              message="Data repository path is null")
        output_result_file(pid, output.dict(by_alias=True))
        return ExecuteResultEnum.SUCCESS
    local_ip = GaussCluster.get_endpoint_by_hostname()
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    if GaussCluster.get_deploy_type() != DeployType.CLUSTER_TYPE or check_uid_consistency(user_name, JobData.PID):
        log.info(f"Not cluster type or cluster nodes uid is consistent, do not need local ip sub dir")
        local_ip = ""
    meta_repository_path = os.path.join(meta_repository_path, local_ip)
    data_repository_path = os.path.join(data_repository_path, local_ip)
    roach_meta_path = os.path.join(meta_repository_path, "meta")
    roach_media_path = os.path.join(data_repository_path, "data", RoachConstant.ROACH_DATA)
    try:
        full_backup_key = get_full_backup_key(os.path.join(meta_repository_path, "meta"))
    except Exception as e:
        log.warning(f"Get full backup key error ex: {str(e)}")
        full_backup_key = None

    repositories = dict()
    repositories["roach_data_path"] = roach_media_path
    repositories["roach_meta_path"] = roach_meta_path
    repositories["meta_data_path"] = meta_repository_path
    try:
        ret = check_backup_type(repositories, job_backup_type, pid, full_backup_key)
    except Exception as ex:
        ret = False
        log.error(f"Check backup failed error is {ex}")
    if not ret:
        output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=ErrorCode.ERROR_INCREMENT_TO_FULL,
                              message="Can not apply this type backup job")
        output_result_file(pid, output.dict(by_alias=True))
        return ExecuteResultEnum.SUCCESS

    output = ActionResult(code=ExecuteResultEnum.SUCCESS)
    output_result_file(pid, output.dict(by_alias=True))
    return ExecuteResultEnum.SUCCESS


if __name__ == "__main__":
    JOB_ID = sys.argv[2]
    JobData.PID = sys.argv[1]
    SysData.SYS_STDIN = sys.stdin.readline()
    Env.USER_NAME = "job_protectObject_auth_authKey"
    log.info(f"Job id: {JOB_ID} get parse param file pid is {JobData.PID}")
    # 校验pid,job_id
    if not is_valid_id(JobData.PID):
        log.warn(f'pid is invalid!')
        sys.exit(1)
    if not is_valid_id(JOB_ID):
        log.warn(f'job_id is invalid!')
        sys.exit(1)
    sys.exit(do_work(JobData.PID))
