import ctypes as ct
from bcc import BPF
from dataclasses import dataclass
from multiprocessing import cpu_count

from GlobalVariableodule import GlobalVariableodule
from python_part.Util import getInode
from python_part.main_policy_database_member.ReadWriteClass import ReadWriteClass
from python_part.main_policy_database_member.ReadWriteClassKernel import ReadWriteClassKernel
from python_part.main_policy_database_member.struct_for_connect import LogMessageKernel, ClassModifyEventKernel, \
    ClassDeleteEventKernel, ClassNewEventKernel

from GlobaLogger import Log
log = Log(__name__).getlog()

# bpftool map dump name path_to_read_write_class_id
# bpftool map dump id 78
# bpftool map list

FILE_NAME_MAX  = GlobalVariableodule.FILE_NAME_MAX
MAX_FOLDER_DEPTH  = GlobalVariableodule.MAX_FOLDER_DEPTH
@dataclass(slots=True)
class FileSearchKey:
    located_directory_id: int
    raw_str: str
    def __hash__(self):
        return hash((self.located_directory_id, self.raw_str))
    def __eq__(self, other):
        if not isinstance(other, FileSearchKey):
            return False
        return self.located_directory_id == other.located_directory_id and self.raw_str == other.raw_str

class FileSearchKeyKernel(ct.Structure):
    _fields_ = [("raw_str", ct.c_char * FILE_NAME_MAX),
                ("located_directory_id", ct.c_int),]

@dataclass(slots=True)
class FileSearchValue:
    my_id: int
    my_read_write_class_id: int
    def __hash__(self):
        return hash((self.my_id, self.my_read_write_class_id))
    def __eq__(self, other):
        if not isinstance(other, FileSearchValue):
            return False
        return self.my_id == other.my_id and self.my_read_write_class_id == other.my_read_write_class_id

class FileSearchValueKernel(ct.Structure):
    _fields_ = [("my_id", ct.c_int),
                ("my_read_write_class_id",  ct.c_int)]


class BPFMapLoader:
    def __init__(self):
        pass
    def convert_read_write_class_to_kernel(self,input_class: ReadWriteClass) -> ReadWriteClassKernel:
        # 1.提取信息
        owner_class_id = input_class.owner_class_id
        class_ids_can_read_this = input_class.class_ids_can_read_this
        class_ids_can_write_this = input_class.class_ids_can_write_this

        # 新建一个 ReadWriteClassKernel 并存入信息
        class_in_kernel = ReadWriteClassKernel()
        class_in_kernel.owner_class_id = ct.c_int(owner_class_id)
        for class_id in class_ids_can_read_this:
            class_id_in_witch_u64 = class_id >> 6  # class_id // 64
            witch_bit_need_enable = class_id & 0b111111  # class_id % 64
            class_in_kernel.class_ids_can_read_this[class_id_in_witch_u64] |= 1 << witch_bit_need_enable
        for class_id in class_ids_can_write_this:
            class_id_in_witch_u64 = class_id >> 6  # class_id // 64
            witch_bit_need_enable = class_id & 0b111111  # class_id % 64
            class_in_kernel.class_ids_can_write_this[class_id_in_witch_u64] |= 1 << witch_bit_need_enable
        # print(class_in_kernel.class_ids_can_read_this[0])
        return class_in_kernel

    def load_entity_path_to_rwclass_id(self, bpf_obj, entity_path_to_rwclass_id):
        # 1. 生成 大散列表
        big_hash_table = {}
        inode_num_to_file_id = {}
        now_free_file_id = 1

        for entity_path in entity_path_to_rwclass_id:
            file_names_in_path = entity_path.split('/')
            tmp_path = "/"
            located_directory_id = 0
            for file_name in file_names_in_path:
                if file_name == '':
                    continue
                if tmp_path == "/":
                    tmp_path += file_name
                else:
                    tmp_path += '/' + file_name
                inode_num = getInode(tmp_path)
                if not inode_num_to_file_id.get(inode_num,False):
                    inode_num_to_file_id[inode_num] = now_free_file_id
                    now_free_file_id += 1

                now_file_id = inode_num_to_file_id[inode_num]
                if file_name == file_names_in_path[-1]:
                    big_hash_table[FileSearchKey(located_directory_id, file_name)] = \
                        FileSearchValue(now_file_id, entity_path_to_rwclass_id[entity_path])
                else:
                    if not big_hash_table.get(FileSearchKey(located_directory_id, file_name), False):
                        big_hash_table[FileSearchKey(located_directory_id, file_name)] = \
                            FileSearchValue(now_file_id, 0)
                located_directory_id = now_file_id
        self.now_free_file_id = now_free_file_id
        print(f"向内核中载入的文件数: {big_hash_table}")

        # 2. 将 大散列表载入map
        # 2.1. 初始化 map_in_map
        # 2.1.1. 生成 创建128个子map的eBPF程序 并 运行
        with open('MapLoader' + '.c', 'w+') as file:
            file.write(f"""
                    #define FILE_NAME_MAX {FILE_NAME_MAX}
                    #define MAX_FOLDER_DEPTH {MAX_FOLDER_DEPTH}

                    // 文件路径 到 read_write_class_id 的映射
                    struct file_search_key {{
                        char raw_str[FILE_NAME_MAX];
                        int located_directory_id;
                    }};
                    struct file_search_value {{
                        int my_id;
                        int my_read_write_class_id;
                    }};
                    """)

            for i in range(128):
                sub_hash_table_name = f"th{i}_path_to_read_write_class_id"
                code_line = f"BPF_HASH({sub_hash_table_name}, struct file_search_key, struct file_search_value);\n"
                file.write(code_line)



        tmp_b = BPF(src_file="MapLoader.c")
        self.tmp_b = tmp_b
        # 2.1.2. 将子map 接入 parent_of_path_to_read_write_class_id
        hash_maps = bpf_obj.get_table(b"parent_of_path_to_read_write_class_id")
        for i in range(128):
            sub_hash_table_name = f"th{i}_path_to_read_write_class_id"
            sub_map = tmp_b.get_table(sub_hash_table_name)
            hash_maps[ct.c_int(i)] = ct.c_int(sub_map.get_fd())

        for file_search_key in big_hash_table:
            key_ndde_in = FileSearchKeyKernel()
            key_ndde_in.located_directory_id = file_search_key.located_directory_id
            key_ndde_in.raw_str = bytes(file_search_key.raw_str, encoding = "utf8")

            value_ndde_in = FileSearchValueKernel()
            value_ndde_in.my_id = big_hash_table[file_search_key].my_id
            value_ndde_in.my_read_write_class_id = big_hash_table[file_search_key].my_read_write_class_id

            which_sub_map = file_search_key.located_directory_id & 0x7f
            sub_map = tmp_b.get_table(f"th{which_sub_map}_path_to_read_write_class_id")
            sub_map[key_ndde_in] = value_ndde_in


    def load_all_policy_to_bpf_map(self, bpf_obj, all_policy: tuple[dict,dict,dict]):
        # 1. 加载 entity_path_to_rwclass_id
        entity_path_to_rwclass_id = all_policy[0]
        self.load_entity_path_to_rwclass_id(bpf_obj, entity_path_to_rwclass_id)

        # 2. 加载 rwclass_id_to_rwclass
        rwclass_id_to_rwclass: dict[int, ReadWriteClass] = all_policy[1]
        self.now_free_read_write_class_id = 0
        for rwclass_id in rwclass_id_to_rwclass:
            self.now_free_read_write_class_id = max(self.now_free_read_write_class_id, rwclass_id)
            class_need_in = self.convert_read_write_class_to_kernel(rwclass_id_to_rwclass[rwclass_id])
            bpf_obj["class_id_to_read_write_class"][ct.c_int(rwclass_id)] = class_need_in
        # 3. 加载 global_config_dict
        global_config_dict = all_policy[2]
        for int_key in global_config_dict:
            bpf_obj["global_variable"][ct.c_int(int_key)] = ct.c_int(global_config_dict[int_key])
        # 4. 初始化 class_id_to_dynamic_read_write_class
        for i in range(10240):
            class_in_kernel = ReadWriteClassKernel()
            bpf_obj["class_id_to_dynamic_read_write_class"][ct.c_int(i)] = class_in_kernel
        # 5. 初始化 free_dynamic_class_id
        cpu_num = cpu_count()
        for i in range(10240):
            now_stack = bpf_obj[f"cpu{i % 4}_free_dynamic_class_id"]
            now_stack.push(ct.c_int(i))
        # 6. 初始化 前后端交流所用的数据结构
        bpf_obj['log_message_cache'][ct.c_int(0)] = LogMessageKernel()
        bpf_obj['class_new_event_event_cache'][ct.c_int(0)] = ClassNewEventKernel()
        bpf_obj['class_delete_event_cache'][ct.c_int(0)] = ClassDeleteEventKernel()
        # 7. 初始化 filepath_cache
        # 会自动初始化
        # 8. alloc_new_read_write_class_id
        bpf_obj['just_for_alloc_read_write_class_id'][ct.c_int(0)] = ct.c_int(self.now_free_read_write_class_id + 1)
        log.info(f"just_for_alloc_read_write_class_id:{self.now_free_read_write_class_id+1}")
        bpf_obj['just_for_alloc_file_id'][ct.c_int(0)] = ct.c_int(self.now_free_file_id + 1)
        log.info(f"just_for_alloc_file_id:{self.now_free_file_id+1}")


