#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os

from common.common import execute_cmd, check_command_injection
from common.common_models import SubJobDetails, LogDetail
from common.const import RepositoryDataTypeEnum, CopyDataTypeEnum, RestoreTypeEnum, DBLogLevel, AuthType
from common.parse_parafile import get_env_variable
from sqlserver import log
from sqlserver.commons.common import output_execution_result_ex
from sqlserver.commons.const import SqlServerReportLabel, BodyErr, SQLServerStrConstant, \
    SQLServerProgressFileType, ParamConstant, SQLServerJsonConstant
from sqlserver.commons.sqlserver_utils import SqlServerUtils


def get_livemount_path(job_id, path):
    """
    适配E6000，组装livemount路径作为恢复时，文件系统的挂载路径。
    """
    if not job_id:
        return path
    e_path = os.path.join(path, "livemount", job_id)
    if os.path.exists(e_path):
        return e_path
    return path


class SQLServerRestoreBase:
    def __init__(self, p_id, job_id, sub_job_id, json_param):
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._p_id = p_id
        self._json_param = json_param
        self.restore_type = RestoreTypeEnum.FULL_RESTORE.value
        self.full_copy = dict()
        self.diff_copy = dict()
        self.log_copy = {"dataPath": [], "metaPath": []}
        self.cache_path = ""
        self.data_time = ""
        self.user_info = ""  # 鉴权信息
        self.vdi_info = ""  # vdi鉴权信息
        self.pass_prefix = ""
        self.sql_utils = None
        self.auth_mode = None
        self.user_name = ""
        self.restore_fail_tail = LogDetail(logInfo=SqlServerReportLabel.SUB_JOB_FAILED_LABEL,
                                           logDetail=BodyErr.ERROR_INTERNAL.value,
                                           logInfoParam=[self._sub_job_id], logLevel=DBLogLevel.ERROR.value)
        self.restore_success_tail = LogDetail(logInfo=SqlServerReportLabel.RESTORE_SUB_JOB_SUCCESS_LABEL,
                                              logInfoParam=[self._sub_job_id], logLevel=DBLogLevel.INFO.value)
        self.running_tail = LogDetail(logInfo=SqlServerReportLabel.RESTORE_SUB_JOB_RUNNING_LABEL,
                                      logInfoParam=[self._sub_job_id], logLevel=DBLogLevel.INFO.value)
        self.channel_number = SQLServerStrConstant.DEFAULT_CHANNEL
        self.new_restore_path = ""

    @staticmethod
    def get_meta_info(file_path):
        path = os.path.join(file_path, "meta.info")
        try:
            with open(path, 'r') as f:
                meta_info = json.loads(f.read())
        except Exception as err:
            log.exception("Exception when read meta info")
            raise Exception("Failed to read meta info") from err
        return meta_info

    @staticmethod
    def read_temp_file(file_path):
        """
        解析临时文件
        :return:
        """
        if not os.path.isfile(file_path):
            raise Exception(f"File:{file_path} not exist")
        try:
            with open(file_path, "r", encoding='UTF-8') as f:
                json_dict = json.loads(f.read())
        except Exception as e:
            raise Exception("parse param file failed") from e
        return json_dict

    @staticmethod
    def filter_list_handle(pre_dir, file_content, start_time, end_time):
        ret_list = []
        line_list = file_content.split('\n')
        for line_mbr in line_list:
            list_by_semicolon = line_mbr.split(';')
            if len(list_by_semicolon) < 2:
                continue
            list_by_wavy_line = list_by_semicolon[1].split('~')
            if len(list_by_wavy_line) != 2:
                continue
            if int(list_by_wavy_line[1]) < int(start_time):
                continue
            # 等于开始时间并且小于结束时间需要进行恢复
            if int(list_by_wavy_line[1]) < int(end_time):
                ret_list.append(os.path.join(pre_dir, list_by_semicolon[0]))
            # 只取第一次大于或等于结束时间的副本
            if int(list_by_wavy_line[1]) >= int(end_time):
                ret_list.append(os.path.join(pre_dir, list_by_semicolon[0]))
                break
        return True, ret_list

    def get_hostname(self):
        net_name = self._json_param.get("job", {}).get("targetObject", {})\
            .get("extendInfo", {}).get("networkName", "")
        if net_name:
            return net_name
        cmd = 'hostname'
        ret, std_out, std_err = execute_cmd(cmd)
        if int(ret) != 0:
            return ""
        return std_out.strip()

    def filter_list(self, original_log_list, start_time):
        time_stamp = self._json_param.get('job', {}).get('extendInfo', {}).get('restoreTimestamp')
        # 全量副本id
        file_id = self._json_param.get('job', {}).get('extendInfo', {}).get('restoreCopyId')
        associated_log_copies = self._json_param.get('job', {}).get('extendInfo', {}).get('associated_log_copies')
        if not original_log_list:
            return True, []
        try:
            pre_dir_name = os.path.dirname(original_log_list[0])
        except Exception as e_info:
            log.error(f"Failed to obtain the upper-level directory. Msg: {e_info}")
            return False, []
        # start_time：全量或差异副本的备份时间，time_stamp：恢复到的时间
        if associated_log_copies:
            log_meta_dict = [
                {
                    "copy_id": copy_id,
                    "start_stamp": timestamps.split('~')[0],
                    "end_stamp": timestamps.split('~')[1]
                }
                for copy_id, timestamps in associated_log_copies.items()
            ]
            # UBC下发的associated_log_copies是倒序，按照时间排序
            log_meta_dict.sort(key=lambda x: x.get("start_stamp"))
            res, ret_list = self.filter_associate_log_copies(log_meta_dict, pre_dir_name, start_time, time_stamp)
            return res, ret_list
        meta_file_name = os.path.join(pre_dir_name, f'{file_id}.meta')
        if not os.path.isfile(meta_file_name):
            log.error("The log copy timestamp file does not exist.")
            return False, []
        try:
            with open(meta_file_name, "r", encoding='UTF-8') as f_info:
                # start_time：全量或差异副本的备份时间，time_stamp：恢复到的时间
                res, ret_list = self.filter_list_handle(pre_dir_name, f_info.read(), start_time, time_stamp)
                return res, ret_list
        except Exception as e_info:
            log.error(f"Failed to read the timestamp file of the log copy. Msg: {e_info}")
            return False, []

    @staticmethod
    def filter_associate_log_copies(log_meta_dict, pre_dir, start_time, end_time):
        ret_list = []
        for log_copy in log_meta_dict:
            if int(log_copy.get("end_stamp")) < int(start_time):
                continue
            # 等于开始时间并且小于结束时间需要进行恢复
            if int(log_copy.get("end_stamp")) < int(end_time):
                ret_list.append(os.path.join(pre_dir, log_copy.get("copy_id")))
            # 只取第一次大于或等于结束时间的副本
            if int(int(log_copy.get("end_stamp"))) >= int(end_time):
                ret_list.append(os.path.join(pre_dir, log_copy.get("copy_id")))
                break
        return True, ret_list

    def get_full_copy_info(self, copy):
        self.data_time = copy.get("extendInfo", {}).get("endTime")
        for repository in copy.get("repositories", []):
            if repository.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                self.full_copy["dataPath"] = get_livemount_path(self._job_id, repository.get("path", [""])[0])
            elif repository.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY.value:
                self.full_copy["metaPath"] = get_livemount_path(self._job_id, repository.get("path", [""])[0])
            elif repository.get("repositoryType") == RepositoryDataTypeEnum.CACHE_REPOSITORY.value:
                self.cache_path = get_livemount_path(self._job_id, repository.get("path", [""])[0])

    def get_diff_copy_info(self, copy, full_path):
        # 全量副本路径获取失败且extendInfo中dataPath为空，直接报错
        if not self.full_copy.get("dataPath", "") and not full_path:
            log.error("Full path is empty")
            raise Exception("Full path is empty")
        self.data_time = copy.get("extendInfo", {}).get("endTime")
        for repository in copy.get("repositories", []):
            if repository.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                self.diff_copy["dataPath"] = get_livemount_path(self._job_id, repository.get("path", [""])[0])
            elif repository.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY.value:
                self.diff_copy["metaPath"] = get_livemount_path(self._job_id, repository.get("path", [""])[0])
            elif repository.get("repositoryType") == RepositoryDataTypeEnum.CACHE_REPOSITORY.value:
                self.cache_path = get_livemount_path(self._job_id, repository.get("path", [""])[0])
        # 适配差异复制副本恢复，只挂载差异副本，全量需要拼接
        if not self.full_copy.get("dataPath", ""):
            dir_path = os.path.dirname(self.diff_copy.get("dataPath"))
            if dir_path.endswith(":"):
                dir_path += "\\"
            if full_path in os.listdir(dir_path):
                # VDI不支持realpath
                self.full_copy["dataPath"] = os.path.join(dir_path, full_path)
        if not self.full_copy.get("metaPath", ""):
            dir_path = os.path.dirname(self.diff_copy.get("metaPath"))
            if dir_path.endswith(":"):
                dir_path += "\\"
            if full_path in os.listdir(dir_path):
                self.full_copy["metaPath"] = os.path.join(dir_path, full_path)

    def get_log_copy_info(self, copy):
        for repository in copy.get("repositories", []):
            if repository.get("repositoryType") == RepositoryDataTypeEnum.LOG_REPOSITORY.value:
                self.log_copy["dataPath"].append(get_livemount_path(self._job_id, repository.get("path", [""])[0]))

    def get_copy_info(self):
        copies = self._json_param.get("job", {}).get("copies", [])
        if not copies:
            raise Exception("Failed to get copies")
        full_path = ""
        for copy in copies:
            if copy.get("type") in [CopyDataTypeEnum.FULL_COPY.value, CopyDataTypeEnum.S3_ARCHIVE,
                                    CopyDataTypeEnum.TAP_ARCHIVE]:
                self.restore_type = RestoreTypeEnum.FULL_RESTORE.value
                self.get_full_copy_info(copy)
                full_path = copy.get("extendInfo", {}).get("dataPath", "")
            elif copy.get("type") == CopyDataTypeEnum.DIFF_COPY.value:
                self.restore_type = RestoreTypeEnum.DIFF_RESTORE.value
                self.get_diff_copy_info(copy, full_path)
            elif copy.get("type") == CopyDataTypeEnum.LOG_COPY.value:
                self.restore_type = RestoreTypeEnum.LOG_RESTORE.value
                self.get_log_copy_info(copy)
        if self.log_copy.get("dataPath"):
            flag, self.log_copy["dataPath"] = self.filter_list(self.log_copy.get("dataPath"), self.data_time)
            if not flag or not self.log_copy.get("dataPath", []):
                log.error("Failed to get filter log copy")
                raise Exception("Failed to get filter log copy")
            self.log_copy["metaPath"] = self.log_copy.get("dataPath")

    def get_extend_info(self):
        copies = self._json_param.get("job", {}).get("copies")
        if not copies:
            raise Exception("Failed to get copies")
        for copy in copies:
            match = ["full", "s3Archive", "tapeArchive"]
            mth_str = copy.get("type")
            if mth_str in match:
                if mth_str == "full":
                    return copy.get("extendInfo")
                else:
                    return copy.get("extendInfo", {}).get("extendInfo")
        raise Exception("Failed to get copies")

    def get_cache_path(self):
        """
        从参数中获取cache仓的位置
        :return:
        """
        copies = self._json_param.get('job', {}).get('copies', [])
        for copy in copies:
            repositories_json = copy.get(SQLServerJsonConstant.REPOSITORIES, [])
            for jsn_mbr in repositories_json:
                if jsn_mbr.get(SQLServerJsonConstant.REPOSITORY_TYPE, "") \
                    == RepositoryDataTypeEnum.CACHE_REPOSITORY.value:
                    self.cache_path = jsn_mbr.get(SQLServerJsonConstant.PATH, [""])[0]
                    # 适配复制恢复，获取到cache仓路径后再停止获取
                if self.cache_path:
                    break

    def write_progress_to_file(self, task_status, progress, log_detail, progress_type):
        if not self.cache_path:
            self.get_cache_path()
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=int(progress),
                               logDetail=list(), taskStatus=task_status)
        if log_detail:
            output.log_detail.append(log_detail)
        if progress_type == SQLServerProgressFileType.COMMON:
            file_path = os.path.join(self.cache_path, progress_type + self._sub_job_id)
        else:
            file_path = os.path.join(self.cache_path, progress_type)
        log.info(f"Write file.path: {file_path} progress: {progress}, task_status: {task_status}, "
                 f"pid: {self._p_id} taskId: {self._job_id} subTaskId: {self._sub_job_id}")
        output_execution_result_ex(file_path, output.dict(by_alias=True))

    def report_progress_comm(self, file_name: str = None):
        self.get_cache_path()
        if not self.cache_path:
            log.error(f"Get cache path failed, pid: {self._p_id} jobId: {self._job_id}")
            return False
        # 默认查询common进度文件
        if not file_name:
            file_name = SQLServerProgressFileType.COMMON + self._sub_job_id
        file_path_comm = os.path.join(self.cache_path, file_name)
        try:
            json_str = self.read_temp_file(file_path_comm)
        except Exception as err:
            log.error(f"Failed to read progress file as err: {err}")
            return False
        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._p_id}")
        log.info(f"Write file path: {file_path} progress: {json_str.get('progress')}, taskStatus: "
                 f"{json_str.get('taskStatus')},  pid: {self._p_id} jobId: {self._job_id}")
        output_execution_result_ex(file_path, json_str)
        return True

    def check_and_prepare_user_info(self, instance_name):
        if not instance_name or check_command_injection(instance_name):
            log.error("Failed to get instance name or the param has special characters.")
            raise Exception("Failed to get instance name")
        localhost = self.get_hostname()
        if instance_name.upper() == SQLServerStrConstant.SQLSERVER_DEFAULT_SERVICES.upper():
            instance = localhost
        else:
            instance = f'\"{localhost}\\\\{instance_name}\"'
        if self.auth_mode == str(AuthType.APP_PASSWORD.value):
            if not self.user_name and not get_env_variable(self.pass_prefix):
                log.error("Failed to get user info or user pass")
                raise Exception("Failed to get user info or user pass")
            if check_command_injection(self.user_name):
                log.error("The param has special characters.")
                raise Exception("The param has special characters.")
            self.user_info = f"-S {instance} -U {self.user_name}"
            self.vdi_info = self.user_info
        elif self.auth_mode == str(AuthType.OS_PASSWORD.value):
            self.user_info = f"-S {instance}"
            self.vdi_info = f"-S {instance} -E"
        else:
            log.error(f"Auth mode: {self.auth_mode} is not support")
            raise Exception("Auth mode not support")
        self.sql_utils = SqlServerUtils(self.auth_mode, self.user_info, self.pass_prefix, self._job_id)

    def check_file_exist(self, file_path, file_list):
        meta_info = self.get_meta_info(file_path)
        for value in meta_info.values():
            for info in value:
                if info.get("physical_name") in file_list:
                    self.restore_fail_tail.log_detail = BodyErr.RESTORE_FAIL_FOR_DATABASE_FILE_EXIST.value
                    log.error(f"File: {info.get('physical_name')} exist")
                    raise Exception("File exist")

    def check_same_filename(self, new_path):
        if not os.path.exists(new_path):
            return True
        target_file_list = os.listdir(new_path)
        # 检查全量副本文件是否存在
        self.check_file_exist(self.full_copy.get("metaPath"), target_file_list)
        # 检查差异副本文件是否存在
        if self.diff_copy:
            self.check_file_exist(self.diff_copy.get("metaPath"), target_file_list)
        # 检查日志副本文件是否存在
        for copy in self.log_copy.get("dataPath"):
            self.check_file_exist(copy, target_file_list)
        return True

    def get_database_restore_info(self, database, new_path, meta_path, new_data_name):
        restore_info = ""
        if not new_path:
            return restore_info
        if not os.path.exists(new_path):
            os.makedirs(new_path)
        # 新位置恢复，准备数据库文件信息
        meta_info = self.get_meta_info(meta_path)
        info_json = meta_info.get(database)
        for index, info in enumerate(info_json):
            if database == new_data_name:
                physical_name = os.path.join(new_path, info.get('physical_name'))
                restore_info = "{},{}".format(restore_info, f"move '{info.get('name')}' to '{physical_name}'")
            else:
                log.info(f"origin physical_name:{info.get('physical_name')}")
                file_suffix = os.path.splitext(info.get('physical_name'))
                physical_name = os.path.join(new_path, f"{new_data_name}_{index}{file_suffix[1]}")
                log.info(f"physical_name:{physical_name}")
                restore_info = "{},{}".format(restore_info, f"move '{info.get('name')}' to '{physical_name}'")
        log.info(f"restore_info:{restore_info}")
        return restore_info

    def get_restore_new_path(self):
        self.new_restore_path = self._json_param.get("job", {}).get("extendInfo", {}).get("newDatabasePath", "")
        if not self.new_restore_path:
            self.new_restore_path = self._json_param.get("job", {}).get("extendInfo", {}). \
                get("extendInfo", {}).get("newDatabasePath", "")
        if not self.new_restore_path:
            self.new_restore_path = self.sql_utils.get_restore_data_path()
        if self.new_restore_path.endswith(":"):
            log.error(f"New path is invalid, path: {self.new_restore_path}")
            raise Exception("New path is invalid")