import fire
import sys
import logging

from enum import Enum

import os

import tasks.bvd.bvd_run as bvd_run
import tasks.bvd.bvd_visual as bvd_visual

import features.asm_note_generate as generate

import features.angr_adapter as adapter

# import scripts.simple_merge as simple_merge

import data_augmentation.augment as augment



def bvd_train_model(model_type:str, model_dir:str, dataset_dir:str, annotation_dir=None, annotation_usage='repeat', graph_save_file=None, dataset_class_save_file=None, log_file=None, seed=None):
    '''
    单数据集的模型训练函数接口
    ==============================================================================
    model_type: 字符串,训练的神经网络类型('gcn'/'transformer')
    model_dir: 训练后保存的神经网络模型文件的路径(支持对现有神经网络做进一训练,如无则新建模型)
    dataset_dir: 输入的数据集存储文件夹路径,内含cfg_graph.json等文件
    annotation_dir: (可选)输入的数据集注解文件夹路径,内含数据集对应的annotation json文件
    annotation_usage: (可选)注解使用方式,默认为'repeat'表示重复训练有注解部分,还可以选择'vote'表示民主集中投票制,还可以'all'选用两种
    log_file: (可选)模型训练保存日志文件的路径
    seed: (可选)数据集划分使用的随机数种子,默认不设置
    graph_save_file: (可选)中间文件的路径,存储输入数据集对应构造出来的cfg等graph结构
    dataset_class_save_file: (可选)中间文件的路径,存储graph数据集合并转化为Dataset类对象后的内容
    ==============================================================================
    '''

    # 1. Dir Check & Init
    # dataset_dir必须要自己按格式提前组织好内容,其他地址参数可自行指定
    # 除dataset_dir外,如果没有对应文件,都会在代码运行时生成
    # 注意:参数可以指向不存在的文件,但不能指向其他无关文件,以免程序崩溃    
    if not os.path.exists(dataset_dir):
        print('[ERROR] Dataset dir not found')
        exit(0)
    if annotation_dir is not None and not os.path.exists(annotation_dir):
        print('[ERROR] Annotation dir not found')
        exit(0)
    model_name = model_dir.split(r'/')[-1].split(r'.pt')[0]
    if graph_save_file == None:
        if not os.path.exists(r'caches_data/graph_save'):
            os.system(r'mkdir -p caches_data/graph_save')
        graph_save_file = r'caches_data/graph_save/' + model_name + r'.pkl'
    if dataset_class_save_file == None:
        if not os.path.exists(r'caches_data/dataset_class'):
            os.system(r'mkdir -p caches_data/dataset_class')
        dataset_class_save_file = r'caches_data/dataset_class/' + model_name + r'.pkl'
    if log_file == None:
        if not os.path.exists(r'logs/train_model'):
            os.system(r'mkdir -p logs/train_model')
        log_file = r'logs/train_model/' + model_name + r'.txt'

    print('Training model:', model_name)
    print('Model type:', model_type)
    print('Dataset:', dataset_dir)
    print('LET\'S GO!')

    # 2. Model training
    if model_type == 'gcn':
        bvd_run.run_gcn_single_dataset(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, log_file, seed, annotation_dir, annotation_usage)
    elif model_type == 'transformer':
        # TODO annotation利用暂时未实现
        bvd_run.run_transformer_single_dataset(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, log_file, seed, annotation_dir)
    elif model_type == 'lstm':
        # TODO annotation利用暂时未实现
        bvd_run.run_lstm_single_dataset(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, log_file, seed, annotation_dir)
    else:
        print('[ERROR] Input unknown model type')
        exit(0)

    print(model_name + 'train finished!')


# def bvd_train_model(model_name:str,dataset_dir,model_file:str,log_file:str,dataset_save_pkl:str=None):
#      # 当model_file所属文件夹不存在时，新建文件夹
#       if not os.path.exists(os.path.dirname(model_file)):
#           os.makedirs(os.path.dirname(model_file))
    
#       if dataset_save_pkl is None:
#           dataset_save_pkl='caches_data/'+os.path.basename(dataset_dir)+'.pkl'
    

def bvd_visual_model(model_type:str, model_dir:str, dataset_dir:str, vis_json_output_dir:str, graph_save_file=None, dataset_class_save_file=None, log_file=None, sample_num=None, sample_file_list=None, vis_info_dir=None, vis_node_con_dir=None):
    '''
    单数据集的模型可视化函数接口
    ==============================================================================
    model_type: 字符串,可视化测试的神经网络类型(目前只支持'gcn')
    model_dir: 可视化测试的神经网络模型文件的路径
    dataset_dir: 输入的数据集存储文件夹路径,内含cfg_graph.json等文件
    vis_json_output_dir: 输出路径,样例解释结果的json文件夹路径
    log_file: (可选)模型可视化保存日志文件的路径
    sample_file_list: (可选)以json为格式指定要求模型解释特定的样例,需要和sample_num对应以免报错
    sample_num: (可选)取出来的good/bad样例数量,如不指定默认在数据集上全量生成解释结果
    graph_save_file: (可选)中间文件的路径,存储输入数据集对应构造出来的cfg等graph结构
    dataset_class_save_file: (可选)中间文件的路径,存储graph数据集合并转化为Dataset类对象后的内容
    vis_info_dir: (可选)中间文件夹的路径,存储模型决策、所有结点对决策贡献可视化的详细信息
    vis_node_con_dir: (可选)中间文件夹的路径,仅保留所有节点对模型决策贡献的信息,可以此为基础产生json输出
    ==============================================================================
    '''


    # 1. Dir Check & Init
    # dataset_dir必须要自己按格式提前组织好内容,其他地址参数可自行指定
    # model_dir必须为现有模型,vis_json_output_dir用于存放每个样例的输出json
    # 除上述几个参数外,如果没有对应文件,都会在代码运行时生成
    # 注意:参数可以指向不存在的文件,但不能指向其他无关文件,以免程序崩溃  
    if not os.path.exists(dataset_dir):
        print('[ERROR] Dataset dir not found')
        exit(0)
    if not os.path.exists(vis_json_output_dir):
        os.system(r'mkdir ' + vis_json_output_dir)    
    model_name = model_dir.split(r'/')[-1].split(r'.pt')[0]
    if graph_save_file == None:
        if not os.path.exists(r'caches_data/graph_save'):
            os.system(r'mkdir caches_data/graph_save')
        graph_save_file = r'caches_data/graph_save/' + model_name + r'.pkl'
    if dataset_class_save_file == None:
        if not os.path.exists(r'caches_data/dataset_class'):
            os.system(r'mkdir caches_data/dataset_class')
        dataset_class_save_file = r'caches_data/dataset_class/' + model_name + r'.pkl'
    if log_file == None:
        if not os.path.exists(r'logs/visual_model'):
            os.system(r'mkdir logs/visual_model')
        log_file = r'logs/visual_model/' + model_name + r'.txt'
    if vis_info_dir == None:
        if not os.path.exists(r'caches_data/vis_info'):
            os.system(r'mkdir caches_data/vis_info')
        vis_info_dir = r'caches_data/vis_info/' + model_name + r'/'
        if not os.path.exists(vis_info_dir):
            os.system(r'mkdir ' + vis_info_dir)
    if vis_node_con_dir == None:
        if not os.path.exists(r'caches_data/vis_node_con'):
            os.system(r'mkdir caches_data/vis_node_con')
        vis_node_con_dir = r'caches_data/vis_node_con/'+ model_name  + r'/'
        if not os.path.exists(vis_node_con_dir):
            os.system(r'mkdir ' + vis_node_con_dir)
    if sample_file_list == None:
        if not os.path.exists(r'caches_data/sample_file_list'):
            os.system(r'mkdir caches_data/sample_file_list')
        sample_file_list = r'caches_data/sample_file_list/' + model_name + r'.json'

    print('Visual testing on model:', model_name)
    print('Model type:', model_type)
    print('Dataset:', dataset_dir)
    print('LET\'S GO!')

    # # 2. visual test
    if model_type == 'gcn':
        bvd_visual.visual_gcn_mod(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, vis_info_dir, vis_node_con_dir, log_file, sample_num, sample_file_list, vis_json_output_dir)
    elif model_type == 'transformer':
        bvd_visual.visual_transformer_mod(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, vis_info_dir, vis_node_con_dir, log_file, sample_num, sample_file_list, vis_json_output_dir)
    elif model_type == 'lstm':
        bvd_visual.visual_lstm_mod(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, vis_info_dir, vis_node_con_dir, log_file, sample_num, sample_file_list, vis_json_output_dir)
    else:
        print('[ERROR] Input unknown model type')
        exit(0)

    print(model_name + ' visual test finished!')


class ModelName(Enum):
    Transformer = 'transformer'
    GCN = 'gcn'


# def bvd_train_model(model_name:str,dataset_dir,model_file:str,log_file:str,dataset_save_pkl:str=None):
#     # 当model_file所属文件夹不存在时，新建文件夹
#     if not os.path.exists(os.path.dirname(model_file)):
#         os.makedirs(os.path.dirname(model_file))
    
#     if dataset_save_pkl is None:
#         dataset_save_pkl='caches_data/'+os.path.basename(dataset_dir)+'.pkl'
    
#     if model_name==ModelName.GCN:
#             bvd_run.run_gcn_single_dataset(dataset_save_pkl,dataset_dir,model_file,None,log_file)
#     elif model_name==ModelName.Transformer:
#             bvd_run.run_transformer_single_dataset(dataset_save_pkl,dataset_dir,model_file,None,log_file)
            
def note(input_dir: str, output_dir: str, ida_path: str, script_path: str):
    """
    asm注解的生成函数接口
    
    input_dir (str): 输入目录，包含待处理的二进制文件，精确到二进制文件上层目录。
    output_dir (str): 输出目录，用于存储处理结果。
    ida_path (str): IDA Pro 工具的路径，建议为IDA_Pro_7.5_SP3。
    script_path (str): IDA 脚本（inline_determine.py）的路径。
    """
    generate.run_all(input_dir, output_dir, ida_path, script_path)
   
# 调用angr生成cfg的函数接口
# ============================================================================== 
# - input_dir (str): 输入目录，包含待处理的二进制文件所在的文件夹。
# - output_dir (str): 输出目录，用于存储处理结果。
# - graph_types (list): 指定生成的图类型，如 ['cfg', 'cdg', 'ddg', 'cg', 'all']，默认生成所有图。

# ============================================================================== 
def angr(input_dir: str, output_dir: str, graph_types: list = ['all']):

    adapter.batch_processing(input_dir, output_dir, graph_types)


# def merge(benign_dir: str, juliet_dir: str):
    
#     simple_merge.simple_merge(benign_dir, juliet_dir)

def data_augmentation(vulnerable_code_dataset:str, secure_code_dataset:str, augmented_code_dir:str):
    '''
    数据增强函数接口
    ==============================================================================
    vulnerable_code_dataset: 漏洞代码数据集的路径
    secure_code_dataset: 安全代码数据集的路径
    augmented_code_dir: 输出路径,增强后的代码数据集的路径
    ==============================================================================
    '''
    augment.data_augmentation_main(vulnerable_code_dataset, secure_code_dataset, augmented_code_dir)

if __name__ == "__main__":

    # fire.Fire()
    # bvd_visual_model('gcn', r'mods/addon_test/CWE121_Stack_Based_Buffer_Overflow_O3_s.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O3_s', r'caches_data/vis_json_output_dir/CWE121_Stack_Based_Buffer_Overflow_O3_s/')


    # bvd_train_model('gcn', r'mods/with_notation/O0_s_train_with_notation_test_repeat.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', r'preprocessed_data/notation/CWE121_Stack_Based_Buffer_Overflow_O0',annotation_usage='repeat',seed=0)
    
    # bvd_train_model('gcn', r'mods/with_notation/O0_s_train_with_notation_test_repeat_with_random.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s',r'preprocessed_data/notation/CWE121_Stack_Based_Buffer_Overflow_O0',annotation_usage='repeat',seed=0)



    # 注解利用cmd历史
    # bvd_train_model('gcn', r'mods/with_notation/O0_s_train_with_notation_test_repeat_with_random_0.5_no_delete_node_new.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s',r'preprocessed_data/notation/CWE121_Stack_Based_Buffer_Overflow_O0',annotation_usage='repeat',seed=0)
    # bvd_visual_model('gcn', r'mods/with_notation/O0_s_train_with_notation_test_repeat_with_random_0.5_no_delete_node_new.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', r'caches_data/vis_json_output_dir/O0_s_train_with_notation_test_repeat_with_random_0.5_no_delete_node_new/')

    # bvd_train_model('lstm', r'mods/lstm/O0_s_train_with_lstm.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', seed=0)
    # bvd_train_model('transformer', r'mods/transformer/O0_s_train_with_transformer.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', seed=0)
    # bvd_visual_model('lstm', r'mods/lstm/O0_s_train_with_lstm.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', r'caches_data/vis_json_output_dir/O0_s_train_with_lstm/')
    bvd_visual_model('transformer', r'mods/transformer/O0_s_train_with_transformer.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', r'caches_data/vis_json_output_dir/O0_s_train_with_transformer/')



    # bvd_visual_model('gcn', r'mods/with_notation/O0_s_train_with_notation.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', r'caches_data/vis_json_output_dir/O0_s_train_with_notation/')
    # for s0i in os.listdir(r'D:\zz_static\zz_static_project\dataset\CWE121_Stack_Based_Buffer_Overflow_O0'):

    #     s0i_path = os.path.join(r'D:\zz_static\zz_static_project\dataset\CWE121_Stack_Based_Buffer_Overflow_O0', s0i)
    #     s0i_bad_path = os.path.join(s0i_path, 'bad')
    #     s0i_good_path = os.path.join(s0i_path, 'good')
    #     output_path = os.path.join(r'D:\zz_static\zz_static_project\preprocessed_data\O0\bad')
    #     note(s0i_bad_path, output_path, r'D:\IDA_Pro_7.5_zz', r'D:\zz_static\zz_static_project\src\features\scripts\inline_determine.py')
    #     output_path = os.path.join(r'D:\zz_static\zz_static_project\preprocessed_data\O0\good')
    #     note(s0i_good_path, output_path, r'D:\IDA_Pro_7.5_zz', r'D:\zz_static\zz_static_project\src\features\scripts\inline_determine.py')
    # bvd_train_model('gcn', r'preprocessed_data/notation/test/annotation_test_v1.pt', r'preprocessed_data/addon_test/addon_juliet_dataset/CWE121_Stack_Based_Buffer_Overflow_O0_s', annotation_dir=r'preprocessed_data/notation/O0', log_file=r'preprocessed_data/notation/test/log.txt', seed=100)