import json
import math
import os.path
from vulcanus.log.log import LOGGER
from vulcanus.restful.resp import state
from zeus.operation_service.app.core.framework.common.constant import FileSize
from zeus.operation_service.app.core.file_util import FileUtil
from zeus.operation_service.app.constant import TaskOperationResultCode, DATA_COLLECT_PATH
from zeus.operation_service.app.core.asset.asset_exception import TaskException
from zeus.operation_service.app.core.framework.task.task_result.task_result_detail import TaskResultDetail
from zeus.operation_service.app.proxy.task import TaskProxy
from zeus.operation_service.app.proxy.asset import AssetProxy
from zeus.operation_service.app.proxy.host import HostProxy


class DataCollectResultDetail(TaskResultDetail):

    def get_items_detail(self, data):
        """
        获取采集任务的详细项
        """
        node_index = data['node_index']
        case_index = data['case_index']
        host_items_detail = self.generate_items_detail(node_index, case_index)
        return state.SUCCEED, host_items_detail

    def get_item_result(self, data):
        """获取单个日志文件内容
            输入: request:{
                        "page_index": 页码 (默认为第一页)
                        "item_path": 该日志文件路径
                        "node_index": 对应task_detail的node_list的下标
                        "case_index": 对应task_detail的case_list的下标
                }
            return: {
                        "data": {
                         "total_page": 文件总页数
                         "content"： 内容
                        }
                    }
        """

        item_path = self.get_item_path(data)
        LOGGER.info(f"get item path {item_path}")
        if not os.path.isfile(item_path):
            LOGGER.error(f"{item_path} not found")
            return state.TASK_RESULT_NOT_FOUND, {}
        total_page = int(os.path.getsize(item_path) / FileSize.READ_SIZE) + 1
        page_index = data.get("page_index", 1)
        content = FileUtil.seek_file(f"{item_path}", (page_index - 1) * FileSize.READ_SIZE)
        data = dict()
        data['total_page'] = total_page
        data['content'] = content
        return state.SUCCEED, data

    def download_single_item_result(self, data):
        """下载单个日志文件内容
            输入: request:{
                        "item_path": 该日志文件路径
                        "node_index": 对应task_detail的node_list的下标
                        "case_index": 对应task_detail的case_list的下标
                }
        """
        item_path = self.get_item_path(data)
        if not os.path.isfile(item_path):
            LOGGER.error(f"get result file {item_path} failed")
            raise TaskException(TaskOperationResultCode.ERR_TASK_RESULT_FILE_NOT_FOUND)
        LOGGER.info(f"get result file:{os.path.basename(item_path)}")
        return item_path

    def update_progress(self, data):
        task_proxy = TaskProxy()
        now_progress = self.task.progress
        finished_task_number = now_progress * self.task.task_total
        finished_task_number += 1
        if self.task.task_total == 0:
            progress = 0.0
        else:
            progress = finished_task_number / self.task.task_total

        # 采集完成后需要等fetch步骤完成
        if math.isclose(progress, 1.0, rel_tol=1e-5) or progress > 1.0:
            progress = 0.99
        task_proxy.update_progress(self.task.task_id, progress)

    def generate_items_detail(self, node_index, case_index):
        items_detail = list()
        chose_case_node = dict()

        # 获取节点所在的case_node
        for case_node in self.task_detail.get('case_nodes'):
            if node_index in case_node['node_indexes']:
                chose_case_node = case_node

        # 获取对应的资产包的items
        case_items = chose_case_node.get('case_indexes').get(str(case_index))
        if case_items is None or len(case_items) == 0:
            LOGGER.error(f"{case_index} items no exists")
            return items_detail
        LOGGER.warning(f"{case_index} get items {case_items}")

        case = self.task_detail.get('case_list')[case_index]
        asset_items = case.get('asset_items')
        asset_proxy = AssetProxy()
        for item_index in case_items:
            item = asset_items[item_index]
            item_id = item.get('id')
            item_info = asset_proxy.get_asset_item_by_id(item_id)
            item_detail = json.loads(item_info.item_detail)
            data_collect_info = self.generate_collect_info(item_detail)
            items_detail.append(data_collect_info)
        return items_detail

    def generate_collect_info(self, item_detail: dict):
        """
        生成详细采集信息
        params:item_detail: 一个采集项的采集内容
        return:[ {"label": os,"childs": [ {"label":disk} ] }, {"label":cpu} ]
        """

        collect_type = {
            "CollectDir": self.generate_collect_dir_info
        }
        for key, value in collect_type.items():
            if key in item_detail.keys():
                return value(item_detail)

    @staticmethod
    def generate_collect_dir_info(item_detail: dict):
        def generate_item_dict(collect_items):
            item_dict = dict()
            if '@is_dir' in collect_items.keys() and collect_items.get('@is_dir') == 'true':
                item_dict['label'] = collect_items.get('@name_en')
                item_dict['children'] = generate_collect_item_detail(collect_items.get('COLLECTITEM'))
            else:
                item_dict['label'] = collect_items.get('@name_en')
            return item_dict

        def generate_collect_item_detail(collect_items):
            if collect_items is None or len(collect_items) == 0:
                return list()
            collect_item_list = list()
            if isinstance(collect_items, dict):
                item_dict = generate_item_dict(collect_items)
                collect_item_list.append(item_dict)
            elif isinstance(collect_items, list):
                for collect_item in collect_items:
                    item_dict = generate_item_dict(collect_item)
                    collect_item_list.append(item_dict)
            return collect_item_list

        collect_detail = dict()
        collect_detail['label'] = item_detail.get('name').get('@name_en')
        collect_detail['children'] = generate_collect_item_detail(item_detail.get('CollectDir').get('COLLECTITEM'))
        return collect_detail

    def get_item_path(self, request_data):
        """获取对应资产包采集日志文件的相对路径
            params: request:{
                        "item_path": 该日志文件路径
                        "node_index": 对应task_detail的node_list的下标
                        "case_index": 对应task_detail的case_list的下标
                }
            return: 相对路径
        """
        host_id = self.node_list[request_data['node_index']]['host_id']
        case_id = self.case_list[request_data['case_index']]['asset_id']
        hostname = ''
        asset_name = ''
        LOGGER.info(f"get item path: host_id: {host_id}, asset_id:{case_id}")
        host = HostProxy().get_host_by_id(host_id)
        if host:
            hostname = "_".join([host.get("host_ip"), self.task.task_name])
        else:
            if self.node_list[request_data['node_index']].get('ip'):
                hostname = self.node_list[request_data['node_index']].get('ip')
        asset = AssetProxy().get_asset_by_asset_id(case_id)
        if asset:
            asset_name = asset.asset_dir
        else:
            if self.case_list[request_data['case_index']].get('asset_dir'):
                asset_name = self.case_list[request_data['case_index']]['asset_dir']
        if hostname == '' or asset_name == '':
            raise TaskException(TaskOperationResultCode.ERR_TASK_RESULT_NEED_DOWNLOAD)
        if len(request_data['item_path']) > 1 and "dmesg" in request_data['item_path']:
            request_data['item_path'].insert(1, 'logGPT')
            item_path = f"{os.path.join(DATA_COLLECT_PATH, self.task.task_id, hostname, asset_name, *request_data['item_path'])}.txt"
        else:
            item_path = f"{os.path.join(DATA_COLLECT_PATH, self.task.task_id, hostname, asset_name, *request_data['item_path'])}.txt"
        LOGGER.info(f"find item result file {item_path}")
        return item_path
