#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import glob
import pwd
import grp
import os
import sqlite3

from dws.commons.log_utils import log
from common.const import SubJobStatusEnum
from dws.commons.const import RestoreProgressFileName, IntrusiveMode, PERMISSION_644, BusinessConfigType
from dws.backup.cluster_backup import ClusterBackup
from dws.commons.function_tool import log_start
from dws.commons.common import save_business_config
from common.file_common import change_path_permission, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file, su_exec_rm_cmd


class RestorePreJob:

    def __init__(self, parse_restore_param_obj, pid):
        self._parse_restore_param_obj = parse_restore_param_obj
        self._main_job_id = self._parse_restore_param_obj.get_restore_param().get("job").get("jobId")
        self._pid = pid
        self._concrete_object_db = "backupkey.db"  # 对象数据聚合后的临时文件
        self._progress_file = ""  # 进度文件
        self._progress = 0  # 当前进度
        self._pid = ""

    @log_start()
    def update_progress(self, status):
        progress_dict = {
            "taskId": self._main_job_id,
            "subTaskId": "",
            "taskStatus": status.value,
            "progress": self._progress,
            "dataSize": 0,
            "speed": 0,
            "extendInfo": {}
        }
        if not self._progress_file:
            _, cache_path = self._parse_restore_param_obj.get_available_path()
            if cache_path == "":
                log.error(f"No cache path can be read or written.cache_path:{cache_path},main task:{self._main_job_id}")
                return
            self._progress_file = os.path.join(cache_path, "tmp",
                                               f"{RestoreProgressFileName.PREREQUISITE_PROGRESS}"
                                               f"{self._parse_restore_param_obj.get_copy_id()}")
            if not os.path.exists(os.path.join(cache_path, "tmp")):
                exec_mkdir_cmd(os.path.join(cache_path, "tmp"))
            self._progress_file = os.path.realpath(self._progress_file)
        if not exec_overwrite_file(self._progress_file, progress_dict):
            log.error(f"Write progress file failed. main job id: {self._main_job_id}")

    @log_start()
    def _aggregate_single_copy_object_data(self, cache_path, object_data_path):
        """
        功能描述：将单个副本里的所有对象数据文件聚合成一个
        cache_path：合成的文件要存放的路径
        object_data_path：被合成的对象数据文件存放路径, meta仓
        """
        log.info(f"Start to merge db in {object_data_path} to {cache_path}")
        tmp_path = os.path.join(cache_path, self._concrete_object_db)
        tmp_path = os.path.realpath(tmp_path)
        if os.path.islink(os.path.join(cache_path, self._concrete_object_db)):
            if not su_exec_rm_cmd(tmp_path):
                log.warn(f"Fail to remove {tmp_path}.")

        try:
            object_conn = sqlite3.connect(os.path.join(cache_path, self._concrete_object_db))
        except Exception as e:
            log.error(f"Connect sqlite {self._concrete_object_db} failed for {e}.main task:{self._main_job_id}")
            return False
        object_cur = object_conn.cursor()
        if not object_conn or not object_cur:
            log.error(f"Connect sqlite {self._concrete_object_db} failed.main task:{self._main_job_id}")
            return False
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                    "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,[bsaObjectOwner] VARCHAR(64),"
                    "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),[estimatedSize] VARCHAR(100) NOT NULL,"
                    "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                    "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                    "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                    "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                    "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables or len(object_tables) == 0:
            log.error(f"Create dws table failed. main task:{self._main_job_id}")
            return False

        db_file_list = self._get_all_db_files(object_data_path)
        if not db_file_list:
            log.error("No db file")
            return False
        for db_file in db_file_list:
            temp_object_cur = sqlite3.connect(db_file).cursor()
            for line in temp_object_cur.execute("select * from BsaObjTable").fetchall():
                str_line = str(line).replace("None", "''")
                object_cur.execute(f"insert into BsaObjTable values {str_line}")
                object_conn.commit()
        object_cur.close()
        object_conn.close()

        if os.path.isfile(os.path.join(cache_path, self._concrete_object_db)):
            change_path_permission(os.path.join(cache_path, self._concrete_object_db), mode=PERMISSION_644)

        # 更新此文件的属主
        user_id, group_id = self._get_file_attribute(object_data_path)
        if not self._set_file_attribute(os.path.join(cache_path, "..", ".."), user_id, group_id):
            return False
        return True

    @log_start()
    def _prepare_dir(self, copy_id, cache_path, metadata_path):
        return ClusterBackup.pre_job_create_res(copy_id, cache_path, metadata_path)

    def _get_all_db_files(self, object_data_path):
        """
        获取所有要被聚合的db文件
        """
        db_file_list = []
        for host_key_path in os.listdir(object_data_path):
            db_path = os.path.join(object_data_path, host_key_path)
            if not os.path.isdir(db_path) or len(glob.glob(os.path.join(db_path, "*.db"))) == 0:
                log.warn(f"There is no object data in metadata path {db_path}. main task:{self._main_job_id}")
                continue

            for db in os.listdir(db_path):
                if db == self._concrete_object_db or not db.endswith(".db"):
                    continue
                db_file_list.append(os.path.join(db_path, db))
        return db_file_list

    def _get_file_attribute(self, file_name):
        """
        获取指定文件/目录归属的用户，组
        """
        object_file_attribute = os.stat(file_name)
        log.info(f"Get user{object_file_attribute.st_uid} group({object_file_attribute.st_gid}) success. "
                 f"main task:{self._main_job_id}")
        return object_file_attribute.st_uid, object_file_attribute.st_gid

    def _set_file_attribute(self, path, user_id, group_id):
        """
        设置指定文件/目录归属的用户，组
        """
        user_name = pwd.getpwuid(user_id).pw_name
        group_name = grp.getgrgid(group_id).gr_name
        if not exec_lchown_dir_recursively(path, user_name, group_name):
            log.error(f"Fail to change user/group for {path}. main task:{self._main_job_id}")
            return False
        return True

    def _do_common_pre_job(self):
        """
        Step:
        1. Get cache and metadata path
        2. aggregate all the object data in metadata path into cache path
        3. write filesystem info, logic ip etc into cache path
        """
        log.info(f"Start to exec prerequisite job {self._main_job_id}.")
        copy_id = self._parse_restore_param_obj.get_copy_id()
        source_backup_id = self._parse_restore_param_obj.get_source_backup_copy_id()
        if not copy_id or not source_backup_id:
            log.error(f"Fail to get copy id({copy_id}), source: {source_backup_id}. main task:{self._main_job_id}")
            return False
        metadata_path, cache_path = self._parse_restore_param_obj.get_available_path()
        if cache_path == "" or metadata_path == "":
            log.error(f"No cache/metadata path can be read or written. cache_path: "
                      f"{cache_path}, metadata_path:{metadata_path}, main task:{self._main_job_id}")
            return False
        # 准备路径
        if not self._prepare_dir(copy_id, cache_path, metadata_path):
            log.error(f"Failed to prepare dir.")
            return False
        target_path = os.path.join(cache_path, "meta", copy_id, "objectmeta")  # 聚合后的路径
        if not os.path.exists(target_path):
            exec_mkdir_cmd(target_path)

        self._progress = 10
        self.update_progress(SubJobStatusEnum.RUNNING)

        if self._parse_restore_param_obj.is_increment_copy():
            business_type = BusinessConfigType.INCREMENT_RESTORE_TYPE
        else:
            business_type = BusinessConfigType.FULL_RESTORE_TYPE
        save_business_config(business_type, cache_path, copy_id)
        # 合并对象数据
        if self._parse_restore_param_obj.is_increment_copy():
            ret = self._aggregate_multi_copies_object_data(target_path, metadata_path)
        else:
            object_data_path = os.path.join(metadata_path, "meta", source_backup_id, "objectmeta")
            if not os.path.exists(object_data_path):
                log.error(f"No object data path({object_data_path}) exists. main task:{self._main_job_id}")
                return False
            try:
                ret = self._aggregate_single_copy_object_data(target_path, object_data_path)
            except Exception as err:
                log.error(f"Aggregate object data failed. main task:{self._main_job_id}, err: {err}")
                return False
        if not ret:
            log.error(f"Aggregate object data failed. main task:{self._main_job_id}")
            return False
        # 为非侵入式方式时，创建临时目录
        if self._parse_restore_param_obj.get_intrusive_mode() == IntrusiveMode.NON_INTRUSIVE_MODE:
            temp_path = os.path.join(f'{cache_path}', 'tmp', copy_id, 'roach_client')
            log.debug(f"Temp path is {temp_path}.")
            if not os.path.exists(temp_path):
                if not exec_mkdir_cmd(temp_path):
                    log.error(f"Fail to create {temp_path}. main task:{self._main_job_id}")
                    return False
        return True

    def _aggregate_multi_copies_object_data(self, target_path, metadata_path):
        """
        功能描述：将单个副本里的所有对象数据文件聚合成一个
        target_path：合成的文件要存放的路径
        metadata_path：元数据仓挂载路径
        返回值：True/False
        """
        copies = self._parse_restore_param_obj.get_all_copy_info()
        if not copies or len(copies) <= 1:
            log.error(f"Copy info is not enough to do increment restore for {self._main_job_id}")
            return False
        for single_copy in copies:
            copy_id = single_copy.get("extendInfo", {}).get("copy_id", "")
            if not copy_id:
                log.error(f"Copy id is null. main task:{self._main_job_id}")
                return False
            object_data_path = os.path.join(metadata_path, "meta", copy_id, "objectmeta")
            if not self._aggregate_single_copy_object_data(target_path, object_data_path):
                log.error(f"Aggregate copy {copy_id} object data failed. main task:{self._main_job_id}")
                return False
        log.info(f"Aggregate multi copies object data for {self._main_job_id}")
        return True

    def _write_fs_relationship(self):
        # 创建映射关系文件存放的目录并更改权限，只用于备份副本，复制副本，复制副本归档到云的副本
        _, cache_path = self._parse_restore_param_obj.get_available_path()
        if not cache_path:
            log.error(f"No cache_path path can be read or written."
                      f"metadata_path:{cache_path}, main task:{self._main_job_id}")
            return False
        temp_relation_path = os.path.join(cache_path, "meta",
                                          self._parse_restore_param_obj.get_copy_id(), "replication")
        if not os.path.exists(temp_relation_path):
            log.error(f"No replication dir: {temp_relation_path}")
            return False
        temp_relation_file = os.path.join(temp_relation_path, "filesystemRelationship.txt")
        temp_relation_file = os.path.realpath(temp_relation_file)
        if os.path.islink(temp_relation_file):
            if not su_exec_rm_cmd(temp_relation_file):
                log.error(f"Fail to remove {temp_relation_file}.")
                return False
        # 获取映射关系
        relation_info = self._parse_restore_param_obj.get_fs_relation()
        log.debug(f"Relation: {relation_info}")
        if not relation_info:
            log.error(f"Fail to get relation info. main task:{self._main_job_id}")
            return False
        json_data = {"relations": relation_info}
        if not exec_overwrite_file(temp_relation_file, json_data):
            log.error(f"Fail to write relation file.")
            return False
        change_path_permission(temp_relation_file, mode=PERMISSION_644)
        return True
