import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import StandardScaler
# from sklearn.metrics import accuracy_score, f1_score
import pefile
import pickle

scaler = pickle.load(open("./static/model/pe/pe_scaler.pkl", "rb"))
model = pickle.load(open('./static/model/pe/pe_model.pkl', 'rb'))


def get_section_size(pe, section_name):
    """ 获取特定节的大小 """
    for section in pe.sections:
        if section.Name.decode().rstrip('\x00') == section_name:
            return section.SizeOfRawData
    return 0


def process(file):
    static, api, dll = extract_features(file)
    return model_predict(static), [static, api, dll]


def process_string(features):
    static, api, dll = features
    return model_predict(static), [static, api, dll]


def model_predict(static):
    features_vec = np.array(static)
    vec = scaler.transform([features_vec])
    features_vec_np = np.array(vec)
    pred = model.predict(features_vec_np)
    return pred[0]


def extract_features(file):
    pe = pefile.PE(data=file)
    static = get_static(pe, file)
    api = get_api(pe)
    dll = get_dll(pe)
    return static, api, dll


def get_dll(pe):
    imported_dlls = []
    if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
        for entry in pe.DIRECTORY_ENTRY_IMPORT:
            dll_name = entry.dll.decode()
            imported_dlls.append(dll_name)
    return imported_dlls


def get_api(pe):
    import_apis = []
    if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
        for entry in pe.DIRECTORY_ENTRY_IMPORT:
            apis = [
                imp.name.decode() for imp in entry.imports
                if imp.name is not None
            ]
            import_apis.extend(apis)
    return import_apis


def get_static(pe, file):
    features = []
    features.append(len(file))
    features.append(0 if pe.FILE_HEADER.Machine == 0x14c else 1)
    features.append(pe.OPTIONAL_HEADER.AddressOfEntryPoint)
    features.append(pe.FILE_HEADER.NumberOfSections)
    features.append(get_section_size(pe, '.text'))
    features.append(get_section_size(pe, '.rsrc'))
    features.append(get_section_size(pe, '.data'))
    features.append(
        len([
            section.Name.decode().rstrip('\x00') for section in pe.sections
            if section.Name.decode().rstrip('\x00') not in
            ['.text', '.data', '.rsrc']
        ]))
    features.append(
        sum([len(entry.imports) for entry in pe.DIRECTORY_ENTRY_IMPORT]
            ) if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT') else 0)
    features.append(
        len(pe.DIRECTORY_ENTRY_EXPORT.symbols
            ) if hasattr(pe, 'DIRECTORY_ENTRY_EXPORT') else 0)
    features.append(
        len(pe.DIRECTORY_ENTRY_IMPORT) if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'
                                                  ) else 0)
    features.append(
        len(pe.DIRECTORY_ENTRY_EXPORT.symbols
            ) if hasattr(pe, 'DIRECTORY_ENTRY_EXPORT') else 0)
    features.append(
        len(pe.DIRECTORY_ENTRY_RESOURCE.entries
            ) if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE') else 0)
    features.append(pe.OPTIONAL_HEADER.SizeOfStackReserve)
    features.append(pe.OPTIONAL_HEADER.MajorOperatingSystemVersion)
    features.append(pe.OPTIONAL_HEADER.ImageBase)
    features.append(pe.OPTIONAL_HEADER.DATA_DIRECTORY[
        pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_BASERELOC']].Size)
    return features


if __name__ == '__main__':
    file_path = "../samples/bad_t302.exe"
    with open(file_path, "rb") as f_in:
        file = f_in.read()
    res = process(file)
    print(res)
