#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import sqlite3
import time
import xml.dom.minidom

from dws.commons.common import log, is_intrusive
from common.util.cmd_utils import cmd_format
from common.file_common import change_path_permission, exec_lchown
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file, exec_mv_cmd, su_exec_rm_cmd
from common.common import execute_cmd, report_job_details
from common.common_models import LogDetail, SubJobDetails
from common.const import DBLogLevel, SubJobStatusEnum
from common.logger import process_string, SENSITIVE_WORDS
from common.env_common import get_install_head_path
from dws.commons.const import DwsRetEnum, IntrusiveMode, PERMISSION_644, PERMISSION_755, DwsBackupLabel
from dws.commons.function_tool import log_start
from dws.commons.dws_exception import ErrCodeException
from dws.commons.error_code import DwsErrorCode
from dws.commons.job_info import JobInfo
from dws.commons.progress_notify import write_file
from dws.resource.dws_common import DwsCommon

AGENT_CFG_FILE = f"{get_install_head_path()}/DataBackup/ProtectClient/ProtectClient-E/conf/agent_cfg.xml"


class RepoPayload:
    def __init__(self):
        self.used_count = 0  # 存储被使用的次数
        self.fs_payload = {}  # 存储里每个文件系统被使用的次数
        self.fs_info = {}
        self.sorted_fs_payload = []


class DistributeDwsNodes:
    def __init__(self, job_info: JobInfo):
        self._job_info = job_info

    @log_start()
    def check_source_delete_cfg(self, host_ip):
        if is_intrusive(self._job_info.nodes) == IntrusiveMode.INTRUSIVE_MODE or not self._job_info.open_source_delete:
            return True
        dom = xml.dom.minidom.parse(AGENT_CFG_FILE)
        config_ret = True
        if not self._check_param(dom):
            log.error("Parm illegal.")
            config_ret = False
        result_path = os.path.join(self._job_info.cache_path, 'tmp', self._job_info.copy_id, 'check_agent_cfg')
        if not os.path.exists(result_path):
            exec_mkdir_cmd(result_path)
        try:
            write_file(os.path.join(result_path, f'{host_ip}.txt'), json.dumps({"configured": config_ret}))
        except Exception:
            log.error("Write config file failed.")
            return False
        return True

    @log_start()
    def _check_param(self, dom):
        """
        函数功能：检查参数合法性
        函数参数：dom: 配置的参数
        返回值：True or False
        """
        if not dom:
            log.error("Analyze agent configuration failed.")
            return False
        value = dom.documentElement
        backup_item = None
        for item in value.childNodes:
            if item.nodeName == "Backup":
                backup_item = item
                break
        if not backup_item:
            log.error("There is not backup section.")
            return False
        if not backup_item.getElementsByTagName('agent_storage_relation'):
            log.error("There is not agent_storage_relation")
            return False
        relation = backup_item.getElementsByTagName('agent_storage_relation')[0].getAttribute('value')
        if not relation:
            log.error("Agent and storage relation is null.")
            return False
        return True

    def distribute_dws_nodes(self):
        log.info(f"Open source delete: {self._job_info.open_source_delete}")
        if is_intrusive(self._job_info.nodes) == IntrusiveMode.NON_INTRUSIVE_MODE and self._job_info.open_source_delete:
            strategy = SourceDeleteStrategy(self._job_info)
        else:
            strategy = NonSourceDeleteStrategy(self._job_info)
        return strategy.distribute_dws_nodes()


class DistributeParent:
    def __init__(self, job_info: JobInfo):
        self._job_info = job_info
        self._db_name = ""
        self._usable_repo = list()  # 元组格式，(存储ID, fsId, fsName)
        self._cluster_nodes_hostname = list()
        self._last_host_list = list()  # 上次备份的DWS节点列表
        self._last_repo_list = list()  # 元组格式，(存储ID, fsId, fsName)
        self._repo_payload = dict()  # 上次备份后的负载，每个文件系统的使用次数
        self._shrink_host = set()  # 此次DWS缩容的节点列表
        self._wait_distribute_nodes = set()  # 待分配的DWS节点
        self._table_name = "DwsHostFilesystemTable"

    @staticmethod
    @log_start()
    def record_backup_info(meta_path, input_str: dict):
        """
        将此备份的信息写入元数据仓
        :param meta_path: 元数据目录
        :param input_str: 要写入的数据
        :return:
        """
        if not meta_path:
            log.error(f"No usable meta path.")
            return False
        backup_info_file = os.path.join(meta_path, "meta", "lastBackupInfo.txt")
        backup_info_file = os.path.realpath(backup_info_file)
        if os.path.exists(backup_info_file):
            if not su_exec_rm_cmd(backup_info_file):
                log.warn(f"Fail to remove {backup_info_file}.")
        exec_overwrite_file(backup_info_file, input_str)

        #  更改权限
        change_path_permission(backup_info_file, mode=PERMISSION_644)
        log.info(f"Write {backup_info_file} with info({input_str}) successfully.")
        return True

    @staticmethod
    @log_start()
    def read_backup_info(meta_path):
        """
        读取上次备份的信息
        :return:
        """
        ret = {"last_open_source_delete": False}
        if not meta_path:
            log.error(f"No usable meta path.")
            return ret
        backup_info_file = os.path.join(meta_path, "meta", "lastBackupInfo.txt")
        if not os.path.isfile(backup_info_file):
            log.warn(f"The {backup_info_file} not exists.")
            return ret
        with open(backup_info_file, "r") as tmp_fo:
            lines = tmp_fo.readlines()
            if len(lines) > 0:
                ret = json.loads(lines[0])
        log.info(f"Read {ret}")
        return ret

    @staticmethod
    @log_start()
    def is_sub_list(source_list, target_list):
        temp_source_set = set(source_list)
        temp_target_set = set(target_list)
        return temp_source_set.issubset(temp_target_set)

    @staticmethod
    @log_start()
    def is_same_list(list_a, list_b):
        temp_set_a = set(list_a)
        temp_set_b = set(list_b)
        return temp_set_a.issubset(temp_set_b) and temp_set_b.issubset(temp_set_a)

    @staticmethod
    @log_start()
    def check_roach_port(ip_port_path, hosts_ip):
        """
        功能描述： 检查所有agent port是否一致
        返回值： 成功 返回ip列表和port
                错误 返回false
        """
        ip_info = []
        for file in os.listdir(ip_port_path):
            file_name_min_len = 11  # IP + .txt
            if len(file) < file_name_min_len:
                log.error(f"File name {file} is not correct.")
                return False, ip_info
            ip_info.append(file[:-4])
        if not ip_info:
            raise ErrCodeException(
                log_detail=LogDetail(
                    logDetail=DwsErrorCode.ROACH_CLIENT_PROCESS_NOT_EXISTS,
                    logDetailParam=[",".join(hosts_ip)],
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))

        return True, ip_info

    @log_start()
    def _get_last_used_repo(self):
        """
        获取上次备份使用的存储集群
        """
        last_host_list = list()
        last_repo_list = list()  # 元组格式，(存储ID, fsId, fsName)
        repo_payload = dict()
        shrink_host = set()
        if not os.path.isfile(self._db_name):
            log.error(f"No db file exists.")
            return
        try:
            object_conn = sqlite3.connect(self._db_name)
        except Exception as e:
            log.error(f"Connect sqlite {self._db_name} failed for {e}")
            return
        object_cur = object_conn.cursor()
        tmp_cmd = cmd_format("select * from {}", self._table_name)
        all_lines = object_cur.execute(tmp_cmd).fetchall()
        for single_line in all_lines:
            log.info(f"Single record: {single_line}")
            table_column_num = 5
            if len(single_line) != table_column_num:
                log.error(f"The record({single_line}) is illegal.")
                return
            last_host_list.append(single_line[0])
            if single_line[0] not in self._cluster_nodes_hostname:
                # 如果某个节点被缩容了，不算负载
                log.info(f"{single_line[0]} is shrunk")
                shrink_host.add(single_line[0])
                continue
            temp_record = (single_line[3], single_line[2], single_line[1])
            if temp_record in repo_payload.keys():
                repo_payload[temp_record] += 1
            else:
                repo_payload[temp_record] = 1
            if temp_record in last_repo_list:
                continue
            last_repo_list.append(temp_record)
        log.info(f"Last backup host: {last_host_list}; Last repo: {last_repo_list}, : {repo_payload}")
        self._last_host_list = last_host_list
        self._last_repo_list = last_repo_list
        self._repo_payload = repo_payload
        self._shrink_host = shrink_host

    @log_start()
    def _get_usable_repo(self):
        """
        获取可用的存储集群
        """
        not_available_repos = []
        for repo in self._job_info.data_reps:
            capacity_ava = repo.get("extendInfo", {}).get("capacityAvailable", "")
            if not capacity_ava:
                log.warn(f"Repo: {repo} is unavailable.")
                not_available_repos.append(repo)
                continue
            log.info(f"Repo: {repo} is available.")
            self._insert_usable_repo(repo)
        if not_available_repos:
            self.report_not_available_repos(not_available_repos)
        if not self._usable_repo:
            log.info(f"No repo is available, use all repo.")
            for repo in self._job_info.data_reps:
                self._insert_usable_repo(repo)
        log.info(f"Usable repo: {self._usable_repo}")

    def report_not_available_repos(self, repos):
        """
        上报警告，不可用的文件系统
        """
        report_esn = [repo.get("extendInfo", {}).get("esn", "") for repo in repos]
        report_fs_id = [repo.get("extendInfo", {}).get("fsId", "") for repo in repos]
        log_detail = LogDetail(
            logInfo=DwsBackupLabel.DWS_BACKUP_STORAGE_REPO_NOT_AVAILABLE_LABEL,
            logInfoParam=[",".join(report_esn), ",".join(report_fs_id)],
            logLevel=DBLogLevel.WARN
        )
        report_job_details(
            self._job_info.pid,
            SubJobDetails(taskId=self._job_info.job_id, subTaskId=self._job_info.sub_job_id, progress=10,
                          logDetail=[log_detail], taskStatus=SubJobStatusEnum.RUNNING))

    @log_start()
    def _clear_db(self):
        """
        清空dwsHosts.db
        """
        tmp_cmd = cmd_format("delete from {}", self._table_name)
        ret, _ = self._execute_sqlite_cmd(tmp_cmd)
        self._repo_payload = {}
        return ret

    @log_start()
    def _create_host_db(self):
        """
        创建dwsHosts.db文件
        """
        if not self._job_info.meta_path:
            log.error(f"No usable meta path.")
            return False
        self._db_name = os.path.join(self._job_info.meta_path, "meta", "dwsHosts.db")
        self._db_name = os.path.realpath(self._db_name)
        if os.path.islink(self._db_name):
            log.warn(f"This is a link file, remove it.")
            if not su_exec_rm_cmd(self._db_name):
                log.warn(f"Fail to remove {self._db_name}.")
        if os.path.isfile(self._db_name):
            log.info(f"Db {self._db_name} file exists.")
            return True
        try:
            object_conn = sqlite3.connect(self._db_name)
        except Exception as e:
            log.error(f"Connect sqlite {self._db_name} failed for {e}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False
        object_cur.execute("CREATE TABLE IF NOT EXISTS [DwsHostFilesystemTable] ("
                           "[hostname] VARCHAR(256) NOT NULL PRIMARY KEY,"
                           "[filesystemName] VARCHAR(256) NOT NULL,"
                           "[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,"
                           "[rsv1] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        #  更改权限
        change_path_permission(self._db_name, mode=PERMISSION_644)
        change_path_permission(os.path.join(self._job_info.meta_path, "meta"), mode=PERMISSION_755)
        if not exec_lchown(self._db_name, "root", "rdadmin"):
            log.error(f"Change owner for {self._db_name} failed.")
            object_cur.close()
            object_conn.close()
            return False
        log.info(f"Create db({self._db_name}) successfully.")
        return True

    def _calculate_current_payload(self):
        # 计算此次要使用的所有存储的负载.元组格式，(存储ID, fsId, fsName)
        repo_payload = {}  # key: esn
        for record in self._usable_repo:
            if record[0] not in repo_payload.keys():
                repo_payload[record[0]] = RepoPayload()
            if record in self._repo_payload.keys():
                repo_payload[record[0]].used_count += self._repo_payload[record]
                repo_payload[record[0]].fs_payload[record[1]] = self._repo_payload[record]
            else:
                repo_payload[record[0]].fs_payload[record[1]] = 0
            repo_payload[record[0]].fs_info[record[1]] = record[2]
        log.info(f"Before sort repo list: {repo_payload}")
        for single_repo in repo_payload.values():
            single_repo.sorted_fs_payload = sorted(single_repo.fs_payload.items(), key=lambda x: x[1], reverse=False)
        repo_payload = sorted(repo_payload.items(), key=lambda x: x[1].used_count, reverse=False)
        # repo_payload值举例：[("esn", RepoPayload)]
        log.info(f"After sort repo list: {repo_payload}")
        return repo_payload

    def _remove_unused_repo(self):
        """
        筛选出此次较上次备份不再使用的存储，以及删除这些存储分配的DWS节点
        """
        removed_repo = []
        for repo in self._last_repo_list:
            if repo not in self._usable_repo:
                removed_repo.append(repo)
        log.info(f"Removed repo: {removed_repo}")
        for repo in removed_repo:
            # 获取绑定到此存储的所有DWS节点
            query_cmd = cmd_format("select * from {} where filesystemId='{}'", self._table_name, repo[1])
            ret, used_record = self._execute_sqlite_cmd(query_cmd)
            for record in used_record:
                if len(record) != 5:
                    log.error(f"The record({record}) is invalid.")
                    return False
                self._wait_distribute_nodes.add(record[0])
            delete_cmd = cmd_format("delete from {} where filesystemId='{}'", self._table_name, repo[1])
            ret, _ = self._execute_sqlite_cmd(delete_cmd)
            if not ret:
                log.error(f"Delete record for {repo[1]} failed.")
                return False
        self._get_last_used_repo()
        log.info(f"Wait for distribute nodes are: {self._wait_distribute_nodes}")
        return True

    def _do_common_part(self):
        # 创建关系数据库文件
        if not self._create_host_db():
            return False
        self._cluster_nodes_hostname = \
            DwsCommon(self._job_info.usr, self._job_info.env_path).filter_cluster_node_info("nodeName")
        log.info(f"Cluster name: {self._cluster_nodes_hostname}")
        if not self._cluster_nodes_hostname:
            log.error("Fail to get current cluster nodes.")
            return False
        # 获取上次备份使用的存储使用详情
        self._get_last_used_repo()
        # 获取本次可使用的存储
        self._get_usable_repo()
        return True

    def _insert_usable_repo(self, repo):
        esn = repo.get("extendInfo", {}).get("esn", "")
        fs_id = repo.get("extendInfo", {}).get("fsId", "")
        fs_name = repo.get("remotePath", "").strip("/")
        if not esn or not fs_id or not fs_name:
            log.warn(f"Repo: {repo} is unavailable.")
            return
        self._usable_repo.append((esn, fs_id, fs_name))

    def _backup_dbfile(self):
        """
        在修改dwsHosts.db时，先将此文件备份
        """
        temp_db_name = os.path.join(self._job_info.meta_path, "meta", "dwsHosts_bk.db")
        ret, output, err = execute_cmd(f"cp {self._db_name} {temp_db_name}")
        if ret != DwsRetEnum.SUCCESS:
            log.error(f"Backup db file failed. output:{output}, err:{err}")
            return False
        log.info(f"Backup db file success.")
        return True

    def _recover_dbfile(self):
        """
        dwsHosts.db如果修改失败，恢复原文件
        """
        temp_db_name = os.path.join(self._job_info.meta_path, "meta", "dwsHosts_bk.db")
        if not os.path.isfile(temp_db_name):
            log.error(f"No backup db file.")
            return
        self._db_name = os.path.realpath(self._db_name)
        if os.path.isfile(self._db_name):
            if not su_exec_rm_cmd(self._db_name):
                log.warn(f"Fail to remove {self._db_name}.")
        if not exec_mv_cmd(temp_db_name, self._db_name, check_white_black_list_flag=False):
            log.error(f"Fail to move db file.")
            return

        exec_lchown(self._db_name, "root", "rdadmin")

    def _execute_sqlite_cmd(self, cmd):
        """
        执行指定的sql语句
        返回值: 执行结果
        """
        try:
            object_conn = sqlite3.connect(self._db_name)
        except Exception as e:
            log.error(f"Connect sqlite {self._db_name} failed for {e}")
            return False, []
        if not object_conn:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False, []
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False, []
        try:
            ret = object_cur.execute(cmd).fetchall()
        except Exception as e:
            log.error(f"Execute cmd failed for {e}.")
            object_cur.close()
            object_conn.close()
            return False, []
        object_conn.commit()
        object_cur.close()
        object_conn.close()
        return True, ret


# 使用源端重删分配方式
class SourceDeleteStrategy(DistributeParent):
    def __init__(self, job_info: JobInfo):
        super().__init__(job_info)
        self._usable_agents = list()  # 此次备份可用的代理列表
        self._agent_repo_relation = dict()  # 代理主机和存储之间的映射关系
        self._dws_storage_relation = dict()  # DWS节点与存储的分配关系,key:dws hostname, value:存储的esn

    @log_start()
    def distribute_dws_nodes(self):
        """
        为DWS节点分配使用的文件系统，返回True/False
        """
        # 处理通用流程
        if not self._do_common_part():
            return False
        # 如果上一次没有使用源端重删方式分配
        last_backup_info = DistributeParent.read_backup_info(self._job_info.meta_path)
        if not last_backup_info.get("last_open_source_delete"):
            log.info("Last backup didn't use source delete.")
            return self._distribute_nodes(self._cluster_nodes_hostname, True)
        # 判断DWS是否扩缩容。如果有，需重新分配
        if self._cluster_nodes_hostname != self._last_host_list:
            log.info("Dws nodes has changed.")
            return self._distribute_nodes(self._cluster_nodes_hostname, True)
        # 对比两次使用的存储集群
        if DistributeParent.is_same_list(self._usable_repo, self._last_repo_list):
            # 如果和上次相等
            log.info("Repo not change.")
            return True
        elif DistributeParent.is_sub_list(self._usable_repo, self._last_repo_list):
            # 如果是上次的子集
            log.info("Repo shrink.")
            if not self._remove_unused_repo():
                return False
        else:
            # 如果存在上次没有使用的存储
            log.info("Exist different storage.")
            return self._distribute_nodes(self._cluster_nodes_hostname, True)
        log.info("Distribute rest nodes.")
        return self._distribute_nodes(self._wait_distribute_nodes, False)

    @log_start()
    def _check_param(self):
        result_path = os.path.join(self._job_info.cache_path, 'tmp', self._job_info.copy_id, 'check_agent_cfg')
        unconfig_agent = []
        for file in os.listdir(result_path):
            with open(os.path.join(result_path, file), "r") as tmp_fo:
                lines = tmp_fo.readlines()
                if len(lines) <= 0:
                    unconfig_agent.append(file[:-4])  # 去掉文件名末尾的.txt
                    continue
                content = json.loads(lines[0])
                if not content.get("configured"):
                    unconfig_agent.append(file[:-4])
        if unconfig_agent:
            raise ErrCodeException(
                log_detail=LogDetail(logDetail=DwsErrorCode.NO_SOURCE_DELETE_CFG,
                                     logDetailParam=[",".join(unconfig_agent)],
                                     logTimestamp=int(time.time()), logLevel=DBLogLevel.ERROR))

    @log_start()
    def _distribute_nodes(self, nodes_list, is_clear_db):
        """
        完全重新分配DWS节点与文件系统关系
        nodes: 待分配的DWS节点
        repo_set: 已经按负载从小到大排序过后的文件系统列表
        is_clear_db: 是否清空原数据库, True：清空
        """
        log.info(f"Nodes_list: {nodes_list}")
        if not self._backup_dbfile():
            return False
        if is_clear_db and not self._clear_db():
            self._recover_dbfile()
            return False
        if not self._get_dws_storage_relation():
            return False
        repo_payload = self._calculate_current_payload()
        if not repo_payload:
            log.error(f"Fail to calculate current payload.")
            return False
        for node in nodes_list:
            # 元组格式，(存储ID, fsId, fsName)
            for repo in repo_payload:
                if repo[0] != self._dws_storage_relation.get(node):
                    continue
                single_fs = repo[1].sorted_fs_payload.pop(0)
                temp_fs_id = single_fs[0]
                temp_fs_name = repo[1].fs_info[temp_fs_id]
                temp_esn = repo[0]
                repo[1].sorted_fs_payload.append(single_fs)
                cmd = f"insert into {self._table_name} values(\"{node}\",\"{temp_fs_name}\"," \
                      f"\"{temp_fs_id}\",\"{temp_esn}\",\"\")"
                log.info(f"Insert values: {self._table_name}, {node}, {temp_fs_name}, {temp_fs_id}, {temp_esn}")
                ret, _ = self._execute_sqlite_cmd(cmd)
                if not ret:
                    self._recover_dbfile()
                    return False
        temp_db_name = os.path.join(self._job_info.meta_path, "meta", "dwsHosts_bk.db")
        temp_db_name = os.path.realpath(temp_db_name)
        if os.path.isfile(temp_db_name):
            if not su_exec_rm_cmd(temp_db_name):
                log.warn(f"Fail to remove {temp_db_name}.")
        self._db_name = os.path.realpath(self._db_name)
        if not exec_lchown(self._db_name, "root", "rdadmin"):
            log.error(f"Change owner failed.")
            return False
        self.record_backup_info(self._job_info.meta_path, {"last_open_source_delete": True})
        return True

    @log_start()
    def _get_agent_repo_relation(self):
        """
        函数功能：1、从白名单子任务的结果中获取是否所有主机配置了映射关系
                 2、判断此次备份可用的代理是否都配置过映射关系，如果没配置过，
        函数参数：None
        返回值: True or False.  是否成功获取映射关系
        """
        self._check_param()
        dom = xml.dom.minidom.parse(AGENT_CFG_FILE)
        backup_item = None
        for item in dom.documentElement.childNodes:
            if item.nodeName == "Backup":
                backup_item = item
                break
        relation = backup_item.getElementsByTagName('agent_storage_relation')[0].getAttribute('value')
        # 判断是否可用的代理都分配了存储，如果没分配，均衡分配一个
        unconfig_agent = list()
        repo_payload = sorted(self._get_storage_payload(relation).items(), key=lambda x: x[1], reverse=False)
        for agent in self._usable_agents:
            if agent not in self._agent_repo_relation.keys():
                unconfig_agent.append(agent)
        # 判断配置使用的存储是否都可用，如果不可用，绑定的代理要重新分配
        index = 0
        for agent in unconfig_agent:
            if index >= len(repo_payload):
                index = 0
            self._agent_repo_relation[agent] = repo_payload[index][0]
            index += 1
        log.info(f"Agent and repo relation is : {self._agent_repo_relation}")
        return True

    @log_start()
    def _get_dws_storage_relation(self):
        """
        预测DWS节点与存储之间的映射关系
        :return:
        """
        index = 0
        log.info(f"_cluster_nodes_hostname: {self._cluster_nodes_hostname}")
        ret, self._usable_agents = DistributeParent.check_roach_port(
            os.path.join(self._job_info.cache_path, 'tmp', self._job_info.copy_id, 'roach_client'),
            self._job_info.host_agents)
        if not ret:
            return False
        try:
            if not self._get_agent_repo_relation():
                return False
        except Exception as exception:
            log.error(f"Get relation failed, err: {process_string(exception.__str__(), SENSITIVE_WORDS)}")
            raise exception

        log.info(f"_usable_agents: {self._usable_agents}")
        for node in self._cluster_nodes_hostname:
            if index >= len(self._usable_agents):
                index = 0
            if not self._usable_agents[index] in self._agent_repo_relation.keys():
                log.error(f"Agent {self._usable_agents[index]} not configure bond storage.")
                return False
            self._dws_storage_relation[node] = self._agent_repo_relation.get(self._usable_agents[index])
            index += 1
        log.info(f"Agent dws relation: {self._dws_storage_relation}")
        return True

    def _get_storage_payload(self, relation):
        """
        函数功能：获取存储负载
        函数参数：relation： 源端映射关系
        返回值：True：返回信息
               False：抛出异常 （Exception）
        """
        storage_payload = dict()
        for item in relation.split(";"):
            temp_value = item.split("@")
            each_item_len = 2
            if len(temp_value) != each_item_len:
                raise Exception(f"The configuration {item} is invalid.")
            # 判断配置使用的存储是否都可用，如果不可用，绑定的代理要重新分配
            if not self._is_in_usable_repo(temp_value[1]):
                log.info(f"Configured storage {temp_value[1]} not usable.")
                continue
            self._agent_repo_relation[temp_value[0]] = temp_value[1]
            if temp_value[1] in storage_payload.keys():
                storage_payload[temp_value[1]] += 1
            else:
                storage_payload[temp_value[1]] = 1

        return storage_payload

    def _is_in_usable_repo(self, esn: str):
        """
        判断某个存储是否在可用存储列表里
        :return:
        """
        for repo in self._usable_repo:
            try:
                temp_esn = repo[0]
            except Exception:
                log.error(f"Repo {repo} is illegal.")
                return False
            if esn == temp_esn:
                return True
        return False


# 不使用源端重删分配方式
class NonSourceDeleteStrategy(DistributeParent):
    @log_start()
    def _distribute_nodes(self, nodes_list, is_clear_db):
        """
        完全重新分配DWS节点与文件系统关系
        nodes: 待分配的DWS节点
        repo_set: 已经按负载从小到大排序过后的文件系统列表
        is_clear_db: 是否清空原数据库, True：清空
        """
        log.info(f"Nodes_list: {nodes_list}, is clear db: {is_clear_db}")
        if not self._backup_dbfile():
            return False
        if is_clear_db and not self._clear_db():
            self._recover_dbfile()
            return False
        repo_payload = self._calculate_current_payload()
        if not repo_payload:
            log.error(f"Fail to calculate current payload.")
            return False
        fs_count = len(repo_payload) * 4
        ret, usable_agents = DistributeParent.check_roach_port(
            os.path.join(self._job_info.cache_path, 'tmp', self._job_info.copy_id, 'roach_client'),
            self._job_info.host_agents)
        if not ret or not usable_agents:
            log.error("No usable agents")
            return False
        agent_count = len(usable_agents)
        log.info(f"fs count: {fs_count}, agent count: {agent_count}")
        storage_reorder_flag = agent_count % fs_count == 0 or fs_count % agent_count == 0
        storage_index = 0
        for node_index, node in enumerate(nodes_list):
            if storage_reorder_flag and node_index > 0 and node_index % fs_count == 0:
                # 在代理个数是文件系统个数的倍数或因数的情况下，文件系统在每轮巡完一轮后，进行重新排列
                repo_payload.append(repo_payload.pop(0))
            if storage_index >= len(repo_payload):
                storage_index = 0
            # 元组格式，(存储ID, fsId, fsName)
            single_fs = repo_payload[storage_index][1].sorted_fs_payload.pop(0)
            temp_fs_id = single_fs[0]
            temp_fs_name = repo_payload[storage_index][1].fs_info[temp_fs_id]
            temp_esn = repo_payload[storage_index][0]
            repo_payload[storage_index][1].sorted_fs_payload.append(single_fs)
            cmd = f"insert into {self._table_name} values(\"{node}\",\"{temp_fs_name}\"," \
                  f"\"{temp_fs_id}\",\"{temp_esn}\",\"\")"
            log.info(f"Insert values: {self._table_name}, {node}, {temp_fs_name}, {temp_fs_id}, {temp_esn}")
            ret, _ = self._execute_sqlite_cmd(cmd)
            if not ret:
                self._recover_dbfile()
                return False
            storage_index += 1
        temp_db_name = os.path.join(self._job_info.meta_path, "meta", "dwsHosts_bk.db")
        temp_db_name = os.path.realpath(temp_db_name)
        if os.path.isfile(temp_db_name):
            if not su_exec_rm_cmd(temp_db_name):
                log.warn(f"Fail to remove {temp_db_name}.")
        self._db_name = os.path.realpath(self._db_name)
        if not exec_lchown(self._db_name, "root", "rdadmin"):
            log.error(f"Change owner failed.")
            return False
        return True

    @log_start()
    def _update_dws_nodes(self):
        """
        判断DWS是否扩缩容。如果有，更新待分配的DWS节点
        """
        for node in self._cluster_nodes_hostname:
            if node not in self._last_host_list:
                self._wait_distribute_nodes.add(node)

    @log_start()
    def distribute_dws_nodes(self):
        """
        为DWS节点分配使用的文件系统，返回True/False
        """
        # 处理通用流程
        if not self._do_common_part():
            return False
        # 如果上一次使用了源端重删方式分配
        last_backup_info = DistributeParent.read_backup_info(self._job_info.meta_path)
        if last_backup_info.get("last_open_source_delete"):
            log.info("Last backup used source delete.")
            return self._distribute_nodes(self._cluster_nodes_hostname, True)
        # 获取上次备份使用存储单元
        last_storage_esn_list = list(set([repo[0] for repo in self._last_repo_list]))
        log.info(f"last_storage_esn_list: {last_storage_esn_list}")
        # 获取本次次备份使用存储单元
        current_storage_esn_list = list(set([repo[0] for repo in self._usable_repo]))
        log.info(f"current_storage_esn_list: {current_storage_esn_list}")
        # 对比两次使用的存储集群文件系统
        if DistributeParent.is_same_list(self._usable_repo, self._last_repo_list):
            # 如果和上次相等
            log.info("Repo not change.")
        elif DistributeParent.is_sub_list(self._usable_repo, self._last_repo_list):
            # 如果是上次的子集
            log.info("Repo shrink.")
            if not self._remove_unused_repo():
                return False
        elif DistributeParent.is_sub_list(self._last_repo_list, self._usable_repo) and \
                len(self._cluster_nodes_hostname) <= len(last_storage_esn_list):
            # 如果上次备份使用的文件系统是这次的子集，且集群节点数量不比上次备份使用的存储设备多，不重新分配
            log.info("No use all repo.")
        elif DistributeParent.is_sub_list(self._last_repo_list, self._usable_repo) and \
                DistributeParent.is_same_list(last_storage_esn_list, current_storage_esn_list):
            # 如果上次备份使用的文件系统是这次的子集，且上次备份使用的存储设备与本次的相同，不重新分配
            log.info("Storage not change")
        else:
            # 如果存在上次没有使用的文件系统
            log.info("Exist different repo.")
            return self._distribute_nodes(self._cluster_nodes_hostname, True)
        # 判断是否扩缩容
        self._update_dws_nodes()
        # 为待分配的DWS节点分配文件系统
        if not self._distribute_rest_dws_nodes():
            log.error("Fail to distribute rest nodes.")
            return False
        self.record_backup_info(self._job_info.meta_path, {"last_open_source_delete": False})
        return True

    def _distribute_rest_dws_nodes(self):
        """
        为待分配的节点分配文件系统
        """
        if not self._wait_distribute_nodes:
            log.info("No rest node need distribute")
            return True
        return self._distribute_nodes(self._wait_distribute_nodes, False)
