#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import time

from gaussdbt.commons.const import log, RoachConstant, JSON_SCHEMA_PATH
from gaussdbt.commons.gaussdbt_common import check_path_validity_upper, parse_backup_path
from common.common import check_command_injection, check_command_injection_exclude_quote
from common.parse_parafile import ParamFileUtil
from common.const import RepositoryDataTypeEnum


class ParamProtection:

    def __init__(self, pid, step='', job_id='', db_user=''):
        self._pid = pid
        self._step = step
        self.meta_object = None
        if check_command_injection_exclude_quote(job_id):
            log.error("The job_id verification fails.")
            raise Exception("The job_id verification fails.")
        self.job_id = job_id
        if check_command_injection(db_user):
            log.error("The db_user verification fails.")
            raise Exception("The db_user verification fails.")
        self.db_user = db_user
        if not self._read_param_from_file():
            raise Exception("Parse job param failed")

    @staticmethod
    def check_path_invaild(reps):
        if reps.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY and reps.get("path"):
            path = reps.get("path")[0]
            if not check_path_validity_upper(path):
                log.error(f'The meta_repository: {path} illegal.')
                return False
        if reps.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY and reps.get("path"):
            path = reps.get("path")[0]
            if not check_path_validity_upper(path):
                log.error(f'The data_repository: {path} illegal.')
                return False
        if reps.get("repositoryType") == RepositoryDataTypeEnum.CACHE_REPOSITORY and reps.get("path"):
            upper_path = os.path.dirname(os.path.realpath(reps.get("path")[0]))
            if not check_path_validity_upper(upper_path):
                log.error(f'The cache_repository: {upper_path} illegal.')
                return False
        return True

    @classmethod
    def get_backup_key(cls, cache_path, job_id):
        backup_key = ''
        file_path = os.path.join(cache_path, job_id)
        if not os.path.exists(file_path):
            log.error(f"File: {file_path} not exist, can not get backup_key")
            return backup_key
        read_list = list()
        try:
            with open(file_path, "r", encoding='UTF-8') as file:
                for line in file.readlines():
                    read_list.append(line)
        except IOError:
            log.error(f'Open file:{file_path} failed.')
        for line in read_list:
            if "backup key" in line:
                backup_key = line.split(',')[1].split(":")[1].strip()
                break
        if check_command_injection(backup_key):
            log.error("The parameter verification fails.")
            return ''
        return backup_key

    @classmethod
    def get_full_backup_key(cls, file_path):
        file_name = os.path.join(file_path, "LastFullCopyKey")
        return cls.read_key(file_name)

    @classmethod
    def get_latest_backup_key(cls, file_path):
        file_name = os.path.join(file_path, "meta", "LastBackupKey")
        return cls.read_key(file_name)

    @classmethod
    def read_key(cls, file_path):
        try:
            with open(file_path, "r") as file:
                key = file.read()
        except IOError:
            log.error(f'Open file:{file_path} failed.')
            return ''
        if check_command_injection(key):
            log.error("The parameter verification fails.")
            return ''
        return key

    @classmethod
    def latest_backup_key(cls, file_path):
        if not file_path:
            log.error("Roach file path is null.")
            return ''
        file_path = os.path.join(file_path, RoachConstant.ROACH_META_FILE_NAME)
        if not os.path.exists(file_path):
            log.error("Roach file is not exit")
            return ''
        try:
            with open(file_path, 'r', encoding='utf-8') as json_object:
                meta_object = json.loads(json_object.read())
        except IOError:
            log.error(f"Open roach meta file: {file_path} failed ")
            return ''
        if not meta_object or not meta_object.get("BackupCount", {}) or not meta_object.get("BackupDetails", {}):
            log.error("Roach meta info is none.")
            return ''
        max_size = meta_object.get("BackupCount")
        backup_meta_list = meta_object.get("BackupDetails")
        if len(backup_meta_list) != max_size:
            log.error("Copy meta info has damage.")
            return ''
        latest_key = backup_meta_list[max_size - 1].get("BackupKey")
        if check_command_injection(latest_key):
            log.error("The parameter verification fails.")
            return ''
        return latest_key

    def get_backup_type(self):
        job_info = self._get_job_info()
        if not job_info or "jobParam" not in job_info:
            log.error("Failed to get jobParam in job_info.")
            return ""
        job_type = ParamFileUtil.parse_backup_type(job_info.get("jobParam"))
        return job_type

    def get_parallel_process(self):
        job_info = self._get_job_info()
        if not job_info:
            log.error("Failed to get jobParam in job_info.")
            return 0
        if job_info.get("extendInfo", {}):
            parallel_process = job_info.get("extendInfo", {}).get("parallel_process")
            # 适配自动化脚本创建的sla，parallel_process置为null的问题
            if parallel_process == "null":
                return None
            if parallel_process:
                parallel_process = int(parallel_process)
                if parallel_process > RoachConstant.MAX_PARALLEL_PROCESS or \
                        parallel_process < RoachConstant.MIN_PARALLEL_PROCESS:
                    return 0
            return parallel_process
        return 0

    def get_backup_standby(self):
        job_info = self._get_job_info()
        if not job_info:
            log.error("Failed to get jobParam in job_info.")
            return False
        if job_info.get("extendInfo", {}):
            job_stand = job_info.get("extendInfo", {}).get("slave_node_first")
            if job_stand == "true":
                return True
        return False

    def get_job_repository_list(self):
        if not self._get_job_info():
            return dict()
        if not self._get_job_info().get("copies"):
            log.error("Get job copy info failed")
            return dict()
        return ParamFileUtil.parse_backup_path(self._get_job_info().get("copies")[0].get("repositories", {}))

    def get_repository_list(self):
        job_info = self._get_job_info()
        backup_dirs = dict()
        if not job_info or "repositories" not in job_info:
            log.error("Failed to get repositories in job_info.")
            return backup_dirs
        repositories = job_info.get("repositories")
        if not repositories and not isinstance(repositories, list):
            log.error('Repositories is empty.')
            return backup_dirs
        backup_dirs = parse_backup_path(repositories)
        if 'log_repository' in backup_dirs.keys():
            self.check_repository_path_invalid(backup_dirs)
        else:
            for repository in backup_dirs:
                check_path = backup_dirs.get(repository, [""])[0]
                if 'cache' in repository:
                    check_path = os.path.dirname(check_path)
                if not check_path_validity_upper(check_path):
                    log.error(f"The {repository}: {check_path} illegal.")
                    backup_dirs.get(repository)[0] = ''
        return backup_dirs

    def check_repository_path_invalid(self, backup_dirs):
        check_path = backup_dirs.get("log_repository", [""])[0]
        check_path = os.path.join(os.path.dirname(check_path), "data")
        backup_dirs.get("log_repository")[0] = check_path
        if not check_path_validity_upper(os.path.dirname(check_path)):
            log.error(f"The log_repository: {check_path} illegal.")
            backup_dirs.get("log_repository")[0] = ''
        check_path = self.get_meta_repos_path(backup_dirs.get("meta_repository", [""]))
        if not check_path_validity_upper(os.path.dirname(check_path)):
            log.error(f"The meta_repository: {check_path} illegal.")
            backup_dirs.get("meta_repository")[0] = ''
        check_path = backup_dirs.get("data_repository", [""])[0]
        if not check_path_validity_upper(check_path):
            log.error(f"The data_repository: {check_path} illegal.")
            backup_dirs.get("data_repository")[0] = ''
        check_path = os.path.dirname(backup_dirs.get("cache_repository", [""])[0])
        if not check_path_validity_upper(check_path):
            log.error(f"The cache_repository: {check_path} illegal.")
            backup_dirs.get("cache_repository")[0] = ''

    def safe_get_all_repositories(self):
        job_info = self._get_job_info()
        if not job_info or "repositories" not in job_info:
            log.error("Failed to get repositories in job_info.")
            return {}
        repositories = job_info.get("repositories")
        if not repositories and not isinstance(repositories, list):
            log.error('Repositories is empty.')
            return {}
        for repository in repositories:
            check_path = repository.get("remotePath")
            if not check_path_validity_upper(os.path.dirname(check_path)):
                log.error(f"The remotePath: {check_path} illegal.")
                return {}
        return repositories
        
    def get_copy_info(self):
        copy_info = self._get_job_info().get("copies", [{}])
        for copy_item in copy_info:
            repositories_info = ParamFileUtil.parse_backup_path(copy_item.get("repositories", {}))
            data_repository_path = repositories_info.get("data_repository", [""])[0]
            cache_repository_path = repositories_info.get("cache_repository", [""])[0]
            copy_extend_info = copy_item.get("extendInfo", {})
            backup_key = copy_extend_info.get("backupIndexId", "")
            if not check_path_validity_upper(os.path.dirname(os.path.realpath(cache_repository_path))):
                log.error(f"The cache_repository: {cache_repository_path} illegal.")
                return ''
            if check_command_injection(f'{data_repository_path} {backup_key}'):
                log.error("The parameter verification fails.")
                return ''
        return copy_info

    def get_job_extend_info(self):
        job_dict = self._get_job_info()
        return job_dict.get("extendInfo", {})

    def get_copy_type(self):
        job_dict = self._get_job_info()
        if job_dict.get("extendInfo", {}).get("restoreTimestamp") and len(job_dict.get("copies", [{}])) > 1:
            # 任意时间点备份，取倒数第二个copy
            copy_dict = job_dict.get("copies", [{}])[-2]
        else:
            # 普通备份，取最后一个copy
            copy_dict = job_dict.get("copies", [{}])[-1]
        copy_type = copy_dict.get("type")
        return copy_type

    def get_restore_key(self):
        job_dict = self._get_job_info()
        if job_dict.get("extendInfo", {}).get("restoreTimestamp") and len(job_dict.get("copies", [{}])) > 1:
            copy_dict = job_dict.get("copies", [{}])[-2]
        else:
            copy_dict = job_dict.get("copies", [{}])[-1]
        backup_key = copy_dict.get("extendInfo", {}).get("backupIndexId")
        copy_type = copy_dict.get("type")
        if copy_type == "s3Archive" or copy_type == "tapeArchive":
            extend_dict = copy_dict.get("extendInfo", {})
            backup_key = extend_dict.get("extendInfo", {}).get("backupIndexId")
        if check_command_injection(backup_key):
            log.error("The parameter verification fails.")
            return ''
        return backup_key

    def get_restore_time_stamp(self):
        job_dict = self._get_job_info()
        time_stamp = ''
        if not job_dict.get("extendInfo", {}):
            log.error('Failed to get extendInfo.')
            return time_stamp
        if job_dict.get("extendInfo", {}).get("restoreTimestamp") and len(job_dict.get("copies", [{}])) > 1:
            time_stamp = job_dict.get("extendInfo", {}).get("restoreTimestamp")
            time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time_stamp)))
        if check_command_injection(time_stamp):
            log.error("The parameter verification fails.")
            return ''
        return time_stamp

    def get_data_copy_dict(self):
        copy_dict = dict()
        job_dict = self._get_job_info()
        if not job_dict.get("extendInfo", {}):
            log.error('ExtendInfo is empty.')
            return copy_dict
        if job_dict.get("extendInfo", {}).get("restoreTimestamp") and len(job_dict.get("copies", [{}])) > 1:
            copy_dict = job_dict.get("copies", [{}])[-2]
        else:
            copy_dict = job_dict.get("copies", [{}])[-1]
        repositories_info = copy_dict.get("repositories", [])
        for reps in repositories_info:
            if not ParamProtection.check_path_invaild(reps):
                return dict()
        return copy_dict

    def get_log_path(self):
        job_dict = self._get_job_info()
        if not job_dict.get("extendInfo", {}).get("restoreTimestamp", "") or len(job_dict.get("copies", [{}])) <= 1:
            log.warning("Log copy info is empty")
            return ""
        copy_dict = job_dict.get("copies", [{}])[-1]
        repositories_info = copy_dict.get("repositories", [])
        for reps in repositories_info:
            if reps.get("repositoryType") == RepositoryDataTypeEnum.LOG_REPOSITORY and reps.get("path"):
                path = reps.get("path")[0]
                if not check_path_validity_upper(os.path.dirname(path)):
                    log.error(f'The data_repository: {path} illegal.')
                    return ""
                return os.path.join(os.path.dirname(path), "data")
        return ""

    def get_subjob(self):
        return self._body_param.get("subJob", {})

    def get_new_cluster(self):
        return self._get_job_info().get("extendInfo", {}).get("targetLocation")

    def get_restore_type(self):
        return self._get_job_info().get("jobParam", {}).get("restoreType", "")

    def get_copy(self):
        return self._get_job_info().get("copy", {})

    def get_job(self):
        return self._get_job_info()

    def get_meta_repos_path(self, path_list):
        log.info(f"Getting meta repos path : {self.job_id}")
        for item in path_list:
            if "log" not in item:
                return item
        return ""

    def _read_param_from_file(self):
        # 通过pid读取到相应的参数文件
        log.info("Start to get param file.")
        try:
            self._body_param = ParamFileUtil.parse_param_file_and_valid_by_schema(self._pid, JSON_SCHEMA_PATH)
        except Exception as err:
            log.error(f"Failed to parse job param file for {err}")
            return False
        if not self._body_param:
            log.error(f"Failed to parse job param file is none")
            return False
        return True

    def _get_job_info(self):
        # 解出参数文件中的job层
        if "job" not in self._body_param or not self._body_param.get("job"):
            log.error("Param file not job info")
            return {}
        return self._body_param.get("job")