#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import base64
import os

from dws.commons.common import log, save_business_config
from common.util.check_user_utils import check_path_owner
from common.util.cmd_utils import cmd_format
from dws.backup.cluster_backup import ClusterBackup
from common.common import execute_cmd
from common.const import SubJobStatusEnum
from common.env_common import get_install_head_path

from dws.commons.const import DwsRetEnum, CopyGenerateType, IntrusiveMode, PERMISSION_755, BusinessConfigType
from dws.commons.function_tool import log_start
from dws.restore.pre_job_parent import RestorePreJob
from common.file_common import change_path_permission, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd


class ArchiveCloudPreJob(RestorePreJob):

    def __init__(self, parse_restore_param_obj, pid):
        super().__init__(parse_restore_param_obj, pid)
        self._agentcli_path = f"{get_install_head_path()}/DataBackup/ProtectClient/ProtectClient-E/bin/agentcli"
        self._target_objectdata_path = ""  # 聚合对象数据后的路径,以及从archive获取到的原副本中的对象数据放置的位置
        self._progress = 0  # 当前进度

    @log_start()
    def do_pre_job(self):
        """
        执行归档直接恢复的前置子任务：
        1. 获取对象数据
        2. 合并对象数据
        3. 将任务信息写入cache仓
        """
        log.info(f"Start to exec archive cloud job {self._main_job_id}.")
        # 准备目录
        copy_id = self._parse_restore_param_obj.get_copy_id()
        if not copy_id:
            log.error(f"Fail to get copy id({copy_id}). main task:{self._main_job_id}")
            return False
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        if cache_path == "":
            log.error(f"No cache path can be read or written. cache_path: "
                      f"{cache_path}, main task:{self._main_job_id}")
            return False
        try:
            ClusterBackup.pre_job_create_res(copy_id, cache_path, "")
        except Exception as err:
            log.error(f"Fail to create path for {err}")
            return False
        self.update_progress(SubJobStatusEnum.RUNNING)
        if not self._get_copy_meta_file() or not self._get_objectdata_from_archive():
            self.update_progress(SubJobStatusEnum.FAILED)
            log.error("Can not get copy meta file or get objectdata from archive.")
            return False
        if self._parse_restore_param_obj.get_copy_generate_type() == CopyGenerateType.GENERATE_BY_GDS:
            self._progress = 100
            self.update_progress(SubJobStatusEnum.COMPLETED)
            return True
        else:
            self._progress = 10

        # 保存任务类型到business_config.txt
        if self._parse_restore_param_obj.is_increment_copy():
            business_type = BusinessConfigType.INCREMENT_RESTORE_TYPE
        else:
            business_type = BusinessConfigType.FULL_RESTORE_TYPE
        save_business_config(business_type, cache_path, copy_id)

        if not self._aggregate_single_copy_object_data(self._target_objectdata_path, self._target_objectdata_path):
            log.error(f"Aggregate object data failed. main task:{self._main_job_id}")
            self.update_progress(SubJobStatusEnum.FAILED)
            return False
        # 为非侵入式方式时，创建临时目录
        copy_id = self._parse_restore_param_obj.get_copy_id()
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        if not copy_id or not cache_path:
            log.error(f"Illegal copy id({copy_id}) or cache path({cache_path})")
            return False
        if self._parse_restore_param_obj.get_intrusive_mode() == IntrusiveMode.NON_INTRUSIVE_MODE:
            temp_path = os.path.join(f'{cache_path}', 'tmp', copy_id, 'roach_client')
            if not os.path.exists(temp_path):
                exec_mkdir_cmd(temp_path)
        self._progress = 100
        # 创建映射关系文件
        if not self._write_fs_relationship():
            log.warn(f"Fail to write relationship file. main task:{self._main_job_id}")
        self.update_progress(SubJobStatusEnum.COMPLETED)
        return True

    @log_start()
    def _get_copy_meta_file(self):
        """
        功能说明：从archive获取copymetadata.sqlite文件，只适用于roach方式备份的副本
        """
        if self._parse_restore_param_obj.get_copy_generate_type() == CopyGenerateType.GENERATE_BY_GDS:
            return True
        # 1.1 获取副本ID
        copy_id = self._parse_restore_param_obj.get_copy_id()
        # 1.2 获取archive地址
        archive_ip, archive_port = self._parse_restore_param_obj.get_archive_ip_port()
        archive_addr = ""
        for single_ip in archive_ip:
            archive_addr = "".join([archive_addr, f"{single_ip},"])
        archive_addr = f"{archive_addr.strip(',')}:{archive_port}"
        log.debug(f"Archive address : {archive_addr}")
        # 1.3 获取从archive获取数据写入的本地路径
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        self._target_objectdata_path = os.path.join(cache_path, "meta", copy_id, "sqlite")
        if not os.path.exists(self._target_objectdata_path):
            exec_mkdir_cmd(self._target_objectdata_path)
        if not exec_lchown_dir_recursively(self._target_objectdata_path, "rdadmin", "rdadmin"):
            log.error(f"Fail to change user/group for {self._target_objectdata_path}.")
            return False
        temp_meta_prefix = self._parse_restore_param_obj.get_archive_meta_prefix()
        meta_repo_path = os.path.join(temp_meta_prefix[0:temp_meta_prefix.find("objectmeta")], "sqlite")
        ret = self._download_repo_path_from_archive(meta_repo_path,
                                                    os.path.join(cache_path, "meta"), copy_id, archive_addr)
        if not ret:
            log.error("Download copy meta data from archive failed")
            return False
        log.info("Download copy meta data from archive successfully.")
        return True

    @log_start()
    def _get_objectdata_from_archive(self):
        # 1.1 获取副本ID
        copy_id = self._parse_restore_param_obj.get_copy_id()
        if not copy_id:
            log.error(f"Fail to get copy id. main task:{self._main_job_id}")
            return False
        # 1.2 获取archive地址
        archive_ip, archive_port = self._parse_restore_param_obj.get_archive_ip_port()
        if not archive_ip or not archive_port:
            log.error(f"Fail to get archive_ip or archive port.")
            return False
        archive_addr = ""
        for ip in archive_ip:
            archive_addr = "".join([archive_addr, f"{ip},"])
        archive_addr = f"{archive_addr.strip(',')}:{archive_port}"
        log.debug(f"Archive address : {archive_addr}")
        # 1.3 获取从archive获取数据写入的本地路径
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        if cache_path == "":
            log.error(f"No cache path can be read or written. cache_path: {cache_path}, "
                      f"main task:{self._main_job_id}")
            return False
        self._target_objectdata_path = os.path.join(cache_path, "meta", copy_id, "objectmeta")
        if not os.path.exists(self._target_objectdata_path):
            exec_mkdir_cmd(self._target_objectdata_path)
        temp_dir = os.path.join(cache_path, "meta")
        if not exec_lchown_dir_recursively(temp_dir, "rdadmin", "rdadmin"):
            log.error(f"Fail to change user/group for {temp_dir}. main task:{self._main_job_id}")
            return False
        # restore_object: archive要查找到对象数据路径的相对路径
        meta_repo_path = self._parse_restore_param_obj.get_archive_meta_prefix()
        ret = self._download_repo_path_from_archive(meta_repo_path, temp_dir, copy_id, archive_addr)
        if not ret:
            log.error("Download meta repo data from archive failed")
            return False
        if self._parse_restore_param_obj.get_copy_generate_type() == CopyGenerateType.GENERATE_BY_GDS:
            self._target_objectdata_path = os.path.join(cache_path, "data", copy_id)
            if not os.path.exists(self._target_objectdata_path):
                exec_mkdir_cmd(self._target_objectdata_path)
            temp_dir = os.path.join(cache_path, "data")
            if not exec_lchown_dir_recursively(temp_dir, "rdadmin", "rdadmin"):
                log.error(f"Fail to change user/group for {temp_dir}. main task:{self._main_job_id}")
                return False
            data_repo_path = self._parse_restore_param_obj.get_archive_data_prefix()
            ret = self._download_repo_path_from_archive(data_repo_path, temp_dir, copy_id, archive_addr)
            if not ret:
                log.error("Download data repo data from archive failed")
                return False
        return True

    @log_start()
    def _download_repo_path_from_archive(self, repo_path, temp_dir, copy_id, archive_addr):
        restore_object = repo_path
        if not restore_object:
            log.error(f"Fail to get prefix. main task:{self._main_job_id}")
            return False
        restore_object = f"/{restore_object}/"
        log.debug(f"The archive search path is {restore_object}")
        try:
            restore_object = base64.b64encode(restore_object.encode(encoding='utf-8', errors='strict'))
        except Exception as e:
            log.error(f"Fail to transfer {restore_object} to bytes for {e}. main task:{self._main_job_id}")
            return False
        restore_object = restore_object.decode('utf-8')
        # 修改资源id目录权限
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        if cache_path == "":
            log.error(f"No cache path can be read or written. cache_path: {cache_path}, "
                      f"main task:{self._main_job_id}")
            return False
        change_path_permission(os.path.dirname(cache_path), mode=PERMISSION_755)
        # 1.4 执行获取命令
        if not check_path_owner(self._agentcli_path, ["rdadmin", "root"]):
            log.error(f"Check path owner failed: {self._agentcli_path}")
            return False
        if not check_path_owner(self._target_objectdata_path, ["rdadmin", "root"]):
            log.error(f"Check path owner failed: {self._target_objectdata_path}")
            return False
        get_cmd = cmd_format("su - rdadmin -s /bin/sh -c '{} GetFileFromArchive {} {} {} {}'",
                             self._agentcli_path, copy_id, archive_addr,
                             self._target_objectdata_path, restore_object)
        ret_code, output, err_str = execute_cmd(get_cmd)
        if ret_code != DwsRetEnum.SUCCESS:
            log.error(f"Get object data from archive failed. cmd:{get_cmd}, ouput: {output}, error:{err_str}. "
                      f"main task:{self._main_job_id}")
            return False
        # restore_object: archive要查找到对象数据路径的相对路径
        log.debug(f"Execute cmd {get_cmd} success. output: {output}")
        log.info(f"Successfully download repo path from archive.")
        return True

    def _get_all_db_files(self, object_data_path):
        """
        获取所有要被聚合的db文件
        """
        db_file_list = []
        for db_name in os.listdir(object_data_path):
            if db_name != self._concrete_object_db and db_name.endswith(".db"):
                db_file_list.append(os.path.join(object_data_path, db_name))
        return db_file_list
