from utils import *
from features import PEFeatureExtractor
from elftools.elf.elffile import ELFFile
import yaml
from tqdm import tqdm
import pefile
import shutil
import hashlib
import time
# from memory_profiler import profile

yaml_file = "/home/cyw/projects/malware_detected/configue.yaml"
with open(yaml_file, 'r') as f:
    config = yaml.safe_load(f)
savePath = config["dataConfig"]["savePath"]
dataPath = config["dataConfig"]["dataPath"]

class FileProcess:
    """
        处理文件夹中的内容
        1. 判断文件类型
        2. 文件移动
        3. 文件去重
        4. 文件转md5命名
    """
    def file_md5(self,filename, block_size=65536):
        """
        计算文件的MD5值
        """
        hasher = hashlib.md5()
        with open(filename, 'rb') as f:
            for block in iter(lambda: f.read(block_size), b''):
                hasher.update(block)
        return hasher.hexdigest()

    def rename_by_md5(self,originPath,savePath):
        """
            将一个文件见中的样本全部改成用md5命名
            正常样本格式不一致，太难操作了
        """
        if not os.path.exists(savePath):
            os.mkdir(savePath)
        for file in tqdm(os.listdir(originPath)):
            filePath = os.path.join(originPath,file)
            tarFilePath = os.path.join(savePath,file)
            # 复制文件
            shutil.copy2(filePath,tarFilePath)
            # 重命名
            fileMd5=self.file_md5(tarFilePath)
            os.rename(tarFilePath, os.path.join(savePath,fileMd5))

    def get_dis_duplicates(self,folder_path):
        """
            获得文件中不重复的文件路径
        """
        file_hashes = {}
        res = []
        cnt=0
        for file in tqdm(os.listdir(folder_path),desc="计算文件md5值"):
            file_path = os.path.join(folder_path, file)
            file_hash = self.file_md5(file_path)
            if file_hash not in file_hashes:
                res.append(file_path)
                file_hashes[file_hash] = file_path
            else:
                cnt+=1
        print("{}下共{}个重复文件".format(folder_path,cnt))
        return res

    def is_pe_file(self,file_path):
        """
            判断文件是否为pe样本
        """
        try:
            pe = pefile.PE(file_path)
            return True
        except pefile.PEFormatError:
            return False
        
    def is_elf_file(self,filename):
        """
            判断是否为elf样本
        """
        try:
            with open(filename, 'rb') as f:
                elffile = ELFFile(f)
                return elffile.header['e_type'] == 'ET_EXEC'
        except:
            return False
   
    def judge_sample_type(self,originPath):
        """
            判断路径中的文件类型
            返回格式：{格式：样本路径}
        """
        res={}
        pe=[]
        elf=[]
        els =[]
        for name in tqdm(os.listdir(originPath),desc="判断文件类型"):
            samplePath = os.path.join(originPath,name)
            if self.is_pe_file(samplePath):
                pe.append(samplePath)
            elif self.is_elf_file(samplePath):
                elf.append(samplePath)
            else:
                els.append(samplePath)
        res["pe"]=pe
        res["elf"]=elf
        res["els"]=els
        return res
    
    def copy_samples(self,tarPath,samples):
        """
            samples表示想要复制的文件名
        """
        if not os.path.exists(tarPath):
            os.makedirs(tarPath)
            print(f"文件夹 '{tarPath}' 创建成功")
        for sample in tqdm(samples,desc="移动文件中"):    
            shutil.copy(sample,tarPath)
    
    def move_by_file_type(self,originPath,tarPath):
        """
            按照类型移动文件夹中的文件
        """
        res = self.judge_sample_type(originPath)
        for key in res:
            tar = res[key]
            self.copy_samples(os.path.join(tarPath,key),tar)

    def move_by_dis_dyplicates(self,originPath,tarPath):
        """
            实现文件去重
        """
        res = self.get_dis_duplicates(originPath)
        self.copy_samples(tarPath,res)

    def shelve_file_made_train_and_test(self,tarPath,trainPath,testPath,limit,limit1):
        """
            将shelve保存的文件，部分移动到特定文件夹，另一部分移动到其它文件夹
        """
        # 临时增加pe文件的判断，后续删除这段代码
        # tempPath = r"/home/cyw/projects/malware_detected/originData/origin_customer_malware/test/pe"
        # peName = {}
        # for i in os.listdir(tempPath):
        #     peName[i] = True
        cnt=0
        train={}
        test={}
        res1,res2=[],[]
        for file in os.listdir(tarPath):
            temp = file.split(".")
            # if temp[0] not in peName:
            #     continue
            if cnt<limit:
                if temp[0] not in train:
                    if os.path.exists(os.path.join(tarPath, temp[0]+'.bak')) and os.path.exists(os.path.join(tarPath, temp[0]+'.dat'))  and os.path.exists(os.path.join(tarPath, temp[0]+'.dir')):
                        train[temp[0]]=True
                        cnt+=1
            elif temp[0] not in test and temp[0] not in train and cnt<limit1:
                if os.path.exists(os.path.join(tarPath, temp[0]+'.bak')) and os.path.exists(os.path.join(tarPath, temp[0]+'.dat'))  and os.path.exists(os.path.join(tarPath, temp[0]+'.dir')):
                    test[temp[0]]=True
                    cnt+=1
        print("移动到train文件夹中：{}个".format(len(train)))
        for tar in train:
            res1.append(os.path.join(tarPath, tar+'.bak'))
            res1.append(os.path.join(tarPath, tar+'.dat'))
            res1.append(os.path.join(tarPath, tar+'.dir'))
        filePro.copy_samples(trainPath,res1)
        print("移动到test文件夹中：{}个".format(len(test)))
        for tar in test:
            res2.append(os.path.join(tarPath, tar+'.bak'))
            res2.append(os.path.join(tarPath, tar+'.dat'))
            res2.append(os.path.join(tarPath, tar+'.dir'))
        filePro.copy_samples(testPath,res2)

class dataProcess:
    """
        pe提取特征，转成特征向量
        特征向量的读取
        处理整个文件的pe,获得json文件
    """
    def __init__(self):
        self.emberDataPath = config["dataConfig"]["ember2018Path"]
        self.pe_extractor = PEFeatureExtractor()
        self.fileProcecss = FileProcess()

    def parse_pe2features(self,bytez):
        """
            提取pe样本中的特征
            具体特征的选择写死在PEFeatureExtractor中
        """
        return self.pe_extractor.raw_features(bytez)
    
    def parse_pe2embedding(self,bytez):
        """
            按照特征将PE装换成固定维度的向量
        """
        return self.pe_extractor.process_raw_features(self.parse_pe2features(bytez))

    def gene_ectorized_form(self):
        """
            读取ember数据集中保存的样本特征
            将其转换成特征向量并保存
        """
        a = time.time()
        print("正在处理样本，务必一次处理完成，否则结果出错")
        create_vectorized_features(self.emberDataPath)
        create_metadata(self.emberDataPath)
        b=time.time()
        print("ember数据集处理耗时{}".format(b-a))

    def read_data(self):
        """
            读取样本的特征向量
        """
        X_train, y_train, X_test, y_test = read_vectorized_features(self.emberDataPath)
        metadata_dataframe = read_metadata(self.emberDataPath)

    # @profile
    def parsePefromFile(self,samplePath,savePath,name,label):
        """
            aim: 处理文件夹中的pe样本,提取其特征json    
            samplePath : 样本数据集的路径
            savePath: 中间文件保存地址
            name： 样本的数据集名
            label：数据集标签(1表示恶意,0表示正常，-1表示无标签)
        """  
        # --------------------------------------中间文件生成--------------------------------------
        # 中间文件生成
        tarPath = os.path.join(savePath,name)
        jsonSavePath =os.path.join(savePath,name + ".jsonl")
        originPath = os.path.join(samplePath,name)
        logPath = os.path.join(tarPath,"finishedSample.txt")
        if not os.path.exists(tarPath):
            os.makedirs(tarPath)
            print(f"中间文件夹 '{tarPath}' 创建成功")
        a={}
        if os.path.exists(logPath):
            with open(logPath,"r") as file:
                for line in file.readlines():
                    a[line.strip()] = True
        else:
            with open(logPath, mode='w', encoding='utf-8') as ff:
                print("新建日志文件{}".format(logPath))
        print("共{}个样本已被处理".format(len(a)))
        # --------------------------------------样本合法性判断--------------------------------------
        files = []
        notPeCount = 0 
        for filename in tqdm(os.listdir(originPath),desc="样本合法性校验"):
            filepath = os.path.join(originPath, filename)
            if filepath not in a and os.path.isfile(filepath):
                if self.fileProcecss.is_pe_file(filepath) == False:
                    print("{}：不是pe文件，已忽略".format(filepath))
                    notPeCount+=1
                else:
                    files.append(filepath)
        print("{}个非pe格式样本".format(notPeCount))
        print("还需处理{}个样本".format(len(files)))
        # --------------------------------------样本特征提取--------------------------------------
        for samplePath in tqdm(files,desc="样本特征提取"):    
            try:
                f=open(samplePath,"rb+")
                tar = f.read()
                res = self.parse_pe2features(tar)
                res["label"]=label
                with open(jsonSavePath, 'a+') as json_file:
                    json.dump(res, json_file)
                    json_file.write("\n")
            except Exception as e:
                print("样本路径{}：".format(samplePath))
                print("错误信息：{}".format(e))
            with open(logPath,"a+") as file:
                file.write(samplePath+"\n")
    
    # @profile
    def gene_ectorized_form_by_path(self,samplePath,saveName="test"):
        """
            aim: 读取路径中的样本Json,将其转换成特征向量并保存
            samplePath: 中间文件的存放目录
            saveName： 特征向量化后的保存名字
        """
        raw_feature_paths = []
        for filename in os.listdir(samplePath):
            tar = filename.split(".")
            if len(tar)==2 and tar[1]=="jsonl":
                raw_feature_paths.append(os.path.join(samplePath,filename))
        extractor = PEFeatureExtractor()
        print("Vectorizing~~")
        X_path = os.path.join(samplePath, "X_{}.dat".format(saveName))
        y_path = os.path.join(samplePath, "y_{}.dat".format(saveName))
        nrows = sum([1 for fp in raw_feature_paths for line in open(fp)])
        if debugLog == "true":
            print("处理的json目录为：{}".format(raw_feature_paths))
            print("总的处理格式为:{}\n".format(nrows))
        vectorize_subset(X_path, y_path, raw_feature_paths, extractor, nrows)
    

if __name__=="__main__":

    filePro =FileProcess()
    dataPro = dataProcess()
    dataPro.gene_ectorized_form()

    # filePro.move_by_file_type("/home/cyw/projects/malware_detected/originData/origin_customer_malware/malware","/home/cyw/projects/malware_detected/originData/origin_customer_malware/test")
    # 训练
    #     正常：300
    #     异常：300
    # 测试
    #     正常：300
    #     异常：300

    # tarPath = r"/home/cyw/projects/malware_detected/saveData/graphData/malware2000"
    # trainPath = r"/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/train_final/benign"
    # testPath = r"/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/test_final/benign"

    # # # tarPath = r"/home/cyw/projects/malware_detected/saveData/graphData/malware1000"
    # # # trainPath = r"/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/train/malware"
    # # # testPath = r"/home/cyw/projects/malware_detected/saveData/graphData/train_and_test/test/malware"
    # filePro.shelve_file_made_train_and_test(tarPath, trainPath, testPath, 300,600)



    # tarPath = "/home/cyw/projects/malware_detected/originData/origin_normal_samples/benign"
    # desPath ="/home/cyw/projects/malware_detected/originData/origin_normal_samples/benign_md5"
    # filePro.rename_by_md5(tarPath,desPath)

    # dataPro = dataProcess()
    # # 生成ember的特征特征向量 （数量80W左右）
    # # 后续的生成步骤要统一，这样太乱了
    # # 耗时：5分钟左右
    # dataPro.gene_ectorized_form()



    # todo
    # 优化代码,模型处理样本,中间文件不要生成在origindata里面
    # 能划分训练测试,生成中间文件是能根据训练测试样本生成
    # 模板方法模型,统一化数据处理流程, 增加易操作步骤冗余判断
    # 数据预处理和其它模块解耦（能直接单独使用）

    # 预期目标，断点执行,切换配置文件一键执行
    # 用户只需要设置配置文件和将样本放入originData即可。
    # 来了一批样本，自动将其按照文件类型进行划分，配置文件确定使用的具体文件类型，
    # 根据文件夹进行样本预处理，获得样本的中间文件，同时根据配置划分训练和测试集合
