"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
# 任务：这里做最终的目标源码重用组件识别
# 识别算法：
# A：目标代码和组件逐个对比，当重叠部分大于10%的时候，再判断组件的每个文件中和目标的重叠部分，最后达到阈值的文件被重用则认为正确识别到
# B.目标代码和组件逐个对比，先判断目标代码是否几乎被包含在组件中，如果不是在判断目标代码整体能不能达到组件的10%，如果可以按照A判断，如果不可以，则认为将组件规模缩小到
# 目标代码的规模，仅仅判断目标代码所涉及到的文件是不是有达到阈值被重用

import math
import multiprocessing
import os
import re
import subprocess

import tlsh
from tqdm import tqdm
from src.common_utils.logger import logger
from src.common_utils import configure
from src.common_utils.utils_file import write_append_jl, write_line_jl, read_file_with_encoding_lines
from collections import Counter, defaultdict

from src.operate_db.Component import Component as Component
from src.operate_db.test.HarmonyProjects import HarmonyProjects
from src.operate_db.RepoFunction_db import RepoFunction
from src.operate_db.VerWeight import VerWeight

# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
ctagsPath		= configure.CONFIG['ctags_path']
resultPath=os.path.join(g_cur_dir,"data_out")
theta=0.1            #这是centris过滤标准
class Detector:
    component = Component()
    repoFunction=RepoFunction()
    verWeight = VerWeight()
    componentDB={}

    def __init__(self,iner_file_theta,file_theta):
        self.iner_file_theta =iner_file_theta  # 新标准：文件内部重用阈值
        self.file_theta = file_theta # 新标准：oss内部重用阈值
    def get_all_component_name(self):
        """
        先获取所有组件的名字
        """
        all_component_name=[]
        res=self.component.collection.find({},{'sig_full_name':1,'func_feature_cnt':1}).sort('func_feature_cnt', 1) #按照特征规模先对比小规模的
        for name in res:
            all_component_name.append(name['sig_full_name'])
        return  all_component_name
    def get_ver_funccnt(self,ver_weight,sig_full_name,ver_common):
        querylist=[sig_full_name+"@@"+tag for tag in ver_weight ]
        tag_funccnt=defaultdict(tuple)
        res=self.repoFunction.get_data({"sig_full_tag_name":  {"$in": querylist}},{'func_cnt':1,'sig_full_tag_name':1})
        if len(res)!=0:
            for item in res:
                parts = item['sig_full_tag_name'].split("@@")
                if len(parts) > 1:
                    tag_name = parts[1]
                    tag_funccnt[tag_name]=(ver_weight[tag_name],float(ver_common[tag_name]/item['func_cnt']))
            return tag_funccnt



    def split_list(self, list, n):
        """
        将数据n等分，分发给n个线程执行

        :param list: 原始完整list
        :param n: 切分数量，即线程数量
        :return: 切分后的list起始位置
        """
        step_count = math.ceil(len(list) / n)
        for i in range(0, len(list), step_count):
            end = i + step_count
            if end > len(list):
                end = len(list)
            # print(f"yield: {i}:{end}")
            yield list[i:end]
    def get_ver_repo(self,tag,sig_full_name):
        return self.repoFunction.get_one_tagfunc({"sig_full_tag_name":sig_full_name+"@@"+tag},{})[0]['tag_func']


    def removeComment(self,string):
        # Code for removing C/C++ style comments. (Imported from VUDDY and ReDeBug.)
        # ref: https://github.com/squizz617/vuddy
        c_regex = re.compile(
            r'(?P<comment>//.*?$|[{}]+)|(?P<multilinecomment>/\*.*?\*/)|(?P<noncomment>\'(\\.|[^\\\'])*\'|"(\\.|[^\\"])*"|.[^/\'"]*)',
            re.DOTALL | re.MULTILINE)
        return ''.join([c.group('noncomment') for c in c_regex.finditer(string) if c.group('noncomment')])

    # Generate TLSH
    def computeTlsh(self,string):
        string = str.encode(string)
        hs = tlsh.forcehash(string)
        return hs

    def normalize(self,string):
        # Code for normalizing the input string.
        # LF and TAB literals, curly braces, and spaces are removed,
        # and all characters are lowercased.
        # ref: https://github.com/squizz617/vuddy
        return ''.join(
            string.replace('\n', '').replace('\r', '').replace('\t', '').replace('{', '').replace('}', '').split(
                ' ')).lower()

    def hashing(self,repoPath):
        # This function is for hashing C/C++ functions
        # Only consider ".c", ".cc", and ".cpp" files
        print(f"正在哈希目标项目")
        possible = (".c", ".cc", ".cpp")

        fileCnt = 0
        funcCnt = 0
        lineCnt = 0

        resDict = {}

        for path, dir, files in os.walk(repoPath):
            for file in files:
                filePath = os.path.join(path, file)

                if file.endswith(possible):
                    try:
                        # Execute Ctgas command
                        functionList = subprocess.check_output(
                            ctagsPath + ' -f - --kinds-C=* --fields=neKSt "' + filePath + '"', stderr=subprocess.STDOUT,
                            shell=True).decode()
                        # For parsing functions
                        lines = read_file_with_encoding_lines(filePath)
                        allFuncs = str(functionList).split('\n')
                        func = re.compile(r'(function)')
                        number = re.compile(r'(\d+)')
                        funcSearch = re.compile(r'{([\S\s]*)}')
                        tmpString = ""
                        funcBody = ""

                        fileCnt += 1

                        for i in allFuncs:
                            elemList = re.sub(r'[\t\s ]{2,}', '', i)
                            elemList = elemList.split('\t')
                            funcBody = ""

                            if i != '' and len(elemList) >= 8 and func.fullmatch(elemList[3]):
                                funcStartLine = int(number.search(elemList[4]).group(0))
                                funcEndLine = int(number.search(elemList[7]).group(0))

                                tmpString = ""
                                tmpString = tmpString.join(lines[funcStartLine - 1: funcEndLine])

                                if funcSearch.search(tmpString):
                                    funcBody = funcBody + funcSearch.search(tmpString).group(1)
                                else:
                                    funcBody = " "

                                funcBody = self.removeComment(funcBody)
                                funcBody = self.normalize(funcBody)
                                funcHash = self.computeTlsh(funcBody)
                                # print(f"函数哈希{funcHash}")

                                if len(funcHash) == 72 and funcHash.startswith("T1"):
                                    funcHash = funcHash[2:]
                                elif funcHash == "TNULL" or funcHash == "" or funcHash == "NULL":
                                    continue

                                storedPath = os.path.relpath(filePath,repoPath)
                                if funcHash not in resDict:
                                    resDict[funcHash] = []
                                resDict[funcHash].append((storedPath,funcStartLine,funcEndLine))

                                lineCnt += len(lines)
                                funcCnt += 1

                    except subprocess.CalledProcessError as e:
                        print("Parser Error:", e)
                        continue
                    except Exception as e:
                        print("Subprocess failed", e)
                        continue
                else:
                    # print("不是c文件")
                    pass

        return resDict, fileCnt, funcCnt, lineCnt

    def readComponentDB(self):
        """
        读取所有组件
        """
        logger.info("[+] read componentDB")
        results=self.component.get_all_features({},{})
        componentdb = {result['sig_full_name']: result for result in results}
        print(f"共计读取{len(componentdb)}个组件")
        return componentdb
    def path_weight_func_count(self,inputDict,predictedVer_repo,func_feature):
        """
        对于目标代码而言，它重用了这个版本组件的哪些文件具体的几个函数,
        注意，是已经经过代码分割的组件，
        是oss独一无二的那部分函数
        @param inputDict: 目标代码哈希
        @param predictedVer_repo: 预测的组件版本下的函数哈希细腻些
        @param func_feature: 该组件的代表性特征
        @return: path_weight,used,unused,modified,strChange
        path_weight:表示每个路径下目标代码用了几个函数
        """
        # 寻找在给版本下，目标代码重用了组件文件中几个函数
        path_weight=defaultdict(int)
        # 开始分析修改后的重用
        used = 0  # 原样复制的函数条数
        unused = 0  # 根本没使用的函数条数
        modified = 0  # 相似的函数条数
        strChange = False  # 路径不一样的函数条数
        used_func=[]
        report_path=defaultdict(list)
        for ohash in predictedVer_repo:
            flag = 0
            if ohash in func_feature:
                if ohash in inputDict:
                    used += 1
                    used_func.append(ohash)
                    for storePath,funcStartLine,funcEndLine in inputDict[ohash]:
                        report_path[str(storePath)].append([funcStartLine,funcEndLine,list(predictedVer_repo[ohash])[0]])

                    nflag = 0
                    for opath in predictedVer_repo[ohash]:
                        for tpath in inputDict[ohash]:
                            if opath in tpath:  # 说明现在这个函数的路径是包含原始路径的，可以说没有发生结构更改
                                nflag = 1
                    if nflag == 0:
                        strChange = True

                    flag = 1

                else:  # 说明函数不是原样重用的
                    for thash in inputDict:  # 比较目标代码的每个函数
                        score = tlsh.diffxlen(ohash, thash)
                        if int(score) <= 30:
                            modified += 1
                            used_func.append(thash)
                            for storePath, funcStartLine, funcEndLine in inputDict[thash]:
                                report_path[str(storePath)].append([funcStartLine, funcEndLine, list(predictedVer_repo[ohash])[0]])

                            nflag = 0
                            for opath in predictedVer_repo[ohash]:
                                for tpath in inputDict[thash]:
                                    if opath in tpath:
                                        nflag = 1
                            if nflag == 0:
                                strChange = True

                            flag = 1

                            break  # TODO: Suppose just only one function meet.
                if flag == 0:
                    unused += 1
                else:
                    # 统计每个路径下都重用了多少函数
                    for path in predictedVer_repo[ohash]:
                        path_weight[path] += 1
        return path_weight,used,unused,modified,strChange,used_func,report_path

    def path_all_func_count(self, predictedVer_repo,func_feature,theta=1.0):
        """
        对于一个组件的一个版本而言，每个路径下都应该有的函数条数
        @param predictedVer_repo: 预测版本下这个组件的函数哈希信息
        @param func_feature: 这个组件的代表性特征
        @return:
        """
        path_all = defaultdict(int)
        for hashFunction, info in predictedVer_repo.items():
            if hashFunction in func_feature:
                for path in info:  # 因为在一开始村塾的时候，美国函数的路径是放到了集合里，因为可能存在同一个函数多个路径的情况
                    path_all[path] += 1*theta
        return path_all
    def calculate_func_reuse_ratio(self,path_weight,path_all):
        """
        计算一个文件被重用的比例，即一个文件中多少函数被重用
        @param path_weight: 目标代码在每个组件中使用了多少函数
        @param path_all: 组件在这个版本下每个路径总共有多少函数
        @return:
        """
        # 计算文件内部重用比例，即一个文件重用多少函数
        file_reuse_ratio = defaultdict(float)
        for path, value in path_weight.items():
            ratio = float(value / path_all[path])
            if ratio > self.iner_file_theta:
                if path not in file_reuse_ratio:
                    file_reuse_ratio[path] = ratio
        return file_reuse_ratio
    def Determine_version(self,commonFunc,inputDict,metadata,name):
        """
        确定组件版本
        @return:
        @rtype:
        """
        # 确定版本
        weightFunc = self.verWeight.get_one_weight({'sig_full_name': name}, {'weight': 1})[0]['weight']
        ver_weight = defaultdict(int)  # 重用了哪些版本，计数
        ver_common = defaultdict(int)  # 计数每个版本重用了几条一模一样的函数
        reuse_path = []  # 目标代码发生重用的路径
        for hashFunction in commonFunc:
            reuse_path.extend(inputDict[hashFunction])
            tags = metadata['func_feature'][hashFunction]
            for tag in tags:
                ver_weight[tag] += weightFunc[hashFunction]
                ver_common[tag] += 1
        tag_funccnt = self.get_ver_funccnt(ver_weight, name, ver_common)
        sortedByWeight = sorted(tag_funccnt.items(), key=lambda x: (x[1][0], x[1][1], x[0]), reverse=True)
        print(f"权重:{sortedByWeight}")
        predictedVer = sortedByWeight[0][0]  # 这是检测出重用的版本
        return predictedVer,ver_weight,reuse_path

    def target_path_all(self,inputDuict):
        """
        计算目标代码每个文件下的函数
        @param inputDuict:
        @type inputDuict:
        @return:
        @rtype:
        """
        path_all=defaultdict(int)
        for func ,info in inputDuict.items():
            for path,funcStartLine,funcEndLine in info:
                path_all[path]+=1
        return path_all


    def detector(self, args):
        """
        B方案，组件识别的核心函数
        增加了，如果目标代码是组件的真子集判断
        对于目标代码特别少的情况不做处理
        @param idx: 进程编号
        @param listarr: 需要对比的组件列表
        @param inputDict: 目标代码哈希
        @param final_res: 最终识别结果
        @param mid_res: 识别过程中间结果
        @return: final_res,mid_res
        """
        idx,listarr,inputDictO,final_res,mid_res,already_func,lock=args
        inputDict = inputDictO.copy()
        targetpath_all = self.target_path_all(inputDict)  # 这是在查看目标代码的所有路径和对应的函数数量
        for name in tqdm(listarr, total=len(listarr),desc=f"process {idx} component对比进度"):
            OSS=name
            metadata=self.component.get_one_feature({'sig_full_name':name},{})[0]
            commonFunc = []
            print(f"在对比：{name},规模：{metadata['func_feature_cnt']}")
            totOSSFuncs = float(metadata['func_feature_cnt']/metadata['ver_cnt']) #这里换成实际对比的函数数量
            if totOSSFuncs == 0.0:
                continue
            comOSSFuncs = 0.0
            target_path_weight=defaultdict(int)
            with lock:
                for hash in already_func:
                    if hash in inputDict:
                        del inputDict[hash]
            print(f'注意，删除特征之后遗留：{len(inputDict)}')
            if len(inputDict)==0:
                break
            for hashval in metadata['func_feature']:
                if hashval in inputDict:
                    commonFunc.append(hashval)
                    comOSSFuncs += 1.0
                    for path,funcStartLine,funcEndLine in inputDict[hashval]:
                        target_path_weight[path]+=1
            if comOSSFuncs==0.0:
                continue
            print(f'公共函数：{comOSSFuncs}')
            print(comOSSFuncs / totOSSFuncs)
            file_reuse_ratio = self.calculate_func_reuse_ratio(target_path_weight, targetpath_all)
            ratio = float(len(file_reuse_ratio) / len(targetpath_all))
            if ratio > self.file_theta:
                mid_data = {
                    'name': OSS,
                    'target_path_weight': target_path_weight,
                    'targetpath_all': targetpath_all,
                    'file_reuse_ratio': file_reuse_ratio,
                    'oss_ratio': ratio
                }
                mid_res.append(mid_data)
                predictedVer, ver_weight, reuse_path = self.Determine_version(commonFunc, inputDict, metadata, name)
                if predictedVer is not None:
                    predictedVer_repo = self.get_ver_repo(predictedVer, OSS)
                    path_weightreuse, used, unused, modified, strChange ,used_func,report_path= self. path_weight_func_count(inputDict,
                                                                                                      predictedVer_repo,
                                                                                                      metadata['func_feature'])
                    with lock:
                        used_func.extend(commonFunc)
                        for hash in used_func:
                            if hash not in already_func:
                                already_func.append(hash)
                    print(f"{idx}小于阈值确定发生重用：{OSS}")
                    reuse_path = dict(Counter(reuse_path))
                    #sort_reuse_path = dict(sorted(reuse_path.items(), key=lambda item: item[1], reverse=True))
                    sort_reuse_path = dict(sorted(report_path.items(), key=lambda x: len(x[1]), reverse=True))
                    report_data = {
                        'name': OSS,
                        'url': metadata['oss_url'],
                        'version': predictedVer,
                        'path': sort_reuse_path,
                        'origin_func_cnt': metadata['sig_info_cnt'],
                        'feature_func_cnt': metadata['func_feature_cnt'],
                        'used_same': str(used),
                        'used_modified': str(modified),
                        'used_strChange': str(strChange),
                        'unused': unused
                    }
                    final_res.append(report_data)
            elif totOSSFuncs*theta<len(inputDict):#表示目标代码总量是可以达到组件阈值的
                if (comOSSFuncs / totOSSFuncs) >= theta:
                    print("目标代码量足够，且达到一重阈值: " + OSS + '\n')
                    predictedVer,ver_weight,reuse_path=self.Determine_version(commonFunc,inputDict,metadata,name)
                    if predictedVer is not None:
                        predictedVer_repo=self.get_ver_repo(predictedVer,OSS)
                        path_weight,used,unused,modified,strChange,used_func,report_path=self.path_weight_func_count(inputDict,predictedVer_repo,metadata['func_feature'])
                        path_all=self.path_all_func_count(predictedVer_repo,metadata['func_feature'])

                        # 新的标准:
                        # 计算文件内部重用比例，即一个文件重用多少函数
                        file_reuse_ratio =self.calculate_func_reuse_ratio(path_weight,path_all)
                        # 计算oss内部重用比例，即重用了多少文件
                        ratio = float(len(file_reuse_ratio) / len(path_all))

                        mid_data = {
                            'name': OSS,
                            'ver_weight': ver_weight,
                            'path_weight': path_weight,
                            'path_all': path_all,
                            'file_reuse_ratio': file_reuse_ratio,
                            'oss_ratio': ratio
                        }
                        mid_res.append(mid_data)
                        if ratio > self.file_theta:
                            print(f'common:{len(commonFunc)}')
                            print(f'usedfunc:{len(used_func)}')
                            with lock:
                                used_func.extend(commonFunc)
                                print(f'合并后：{len(used_func)}')
                                for hash in used_func:
                                    if hash not in already_func:
                                        already_func.append(hash)
                            print(f"{idx}确定发生重用：{OSS}")
                            reuse_path  = dict(Counter(reuse_path))
                            #sort_reuse_path=dict(sorted(reuse_path.items(), key=lambda item: item[1], reverse=True))
                            sort_reuse_path = dict(sorted(report_path.items(), key=lambda x: len(x[1]), reverse=True))
                            report_data = {
                                'name': OSS,
                                'url': metadata['oss_url'],
                                'version': predictedVer,
                                'path': sort_reuse_path,
                                'origin_func_cnt':metadata['sig_info_cnt'],
                                'feature_func_cnt':metadata['func_feature_cnt'],
                                'used_same': str(used),
                                'used_modified': str(modified),
                                'used_strChange': str(strChange),
                                'unused': unused
                            }
                            final_res.append(report_data)
            # else:  # 说明目标代码总量都无法达到组件阈值
            #     if (comOSSFuncs / len(inputDict)) >= theta:
            #         print("目标小于OSS，且达到一重阈值: " + OSS + '\n')
            #         predictedVer, ver_weight, reuse_path=self.Determine_version(commonFunc,inputDict,metadata,name)
            #         predictedVer_repo=self.get_ver_repo(predictedVer,OSS)
            #         path_weight,used,unused,modified,strChange=self.path_weight_func_count(inputDict,predictedVer_repo,metadata['func_feature'])
            #         path_all=self. path_all_func_count(predictedVer_repo,metadata['func_feature'],float(len(inputDict)/totOSSFuncs))
            #
            #         # 新的标准:
            #         # 计算文件内部重用比例，即一个文件重用多少函数
            #         file_reuse_ratio =self.calculate_func_reuse_ratio(path_weight,path_all)
            #         # 计算oss内部重用比例，即重用了多少文件
            #         ratio = float(len(file_reuse_ratio) / len(path_weight))
            #         mid_data = {
            #             'name': OSS,
            #             'ver_weight': ver_weight,
            #             'path_weight': path_weight,
            #             'path_all': path_all,
            #             'file_reuse_ratio': file_reuse_ratio,
            #             'oss_ratio': ratio
            #         }
            #         mid_res.append(mid_data)
            #         if ratio > self.file_theta:
            #             print(f"目标代码规模小，确定发生重用：{OSS}")
            #             reuse_path  = dict(Counter(reuse_path))
            #             report_data = {
            #                 'name': OSS,
            #                 'url': metadata['oss_url'],
            #                 'version': predictedVer,
            #                 'path': reuse_path,
            #                 'origin_func_cnt':metadata['sig_info_cnt'],
            #                 'feature_func_cnt':metadata['func_feature_cnt'],
            #                 'used_same': str(used),
            #                 'used_modified': str(modified),
            #                 'used_strChange': str(strChange),
            #                 'unused': unused
            #             }
            #             final_res.append(report_data)
        return final_res,mid_res

    # def detector(self, idx, listarr, inputDict, final_res, mid_res,already_func):
    #     """
    #     A方案，组件识别的核心函数
    #     @param idx: 进程编号
    #     @param listarr: 需要对比的组件列表
    #     @param inputDict: 目标代码哈希
    #     @param final_res: 最终识别结果
    #     @param mid_res: 识别过程中间结果
    #     @return: final_res,mid_res
    #     """
    #     for name in tqdm(listarr, total=len(listarr), desc=f"process {idx} component对比进度"):
    #         OSS = name
    #         metadata = self.component.get_one_feature({'sig_full_name': name}, {})[0]
    #         commonFunc = []
    #         totOSSFuncs = float(metadata['func_feature_cnt'] / metadata['ver_cnt'])  # 这里换成实际对比的函数数量
    #         if totOSSFuncs == 0.0:
    #             continue
    #         comOSSFuncs = 0.0
    #         for hashval in metadata['func_feature']:
    #             if hashval in inputDict and hashval not in already_func:
    #                 commonFunc.append(hashval)
    #                 comOSSFuncs += 1.0
    #             elif hashval in already_func:
    #                 print(f"已经对比过")
    #
    #         if (comOSSFuncs / totOSSFuncs) >= theta:
    #             print("OSS: " + OSS + '\n')
    #             # 确定版本
    #             weightFunc = self.verWeight.get_one_weight({'sig_full_name': name}, {'weight': 1})[0]['weight']
    #             ver_weight = defaultdict(int)  # 重用了哪些版本，计数
    #             ver_common = defaultdict(int)  # 计数每个版本重用了几条一模一样的函数
    #             reuse_path = []  # 目标代码发生重用的路径
    #             for hashFunction in commonFunc:
    #                 reuse_path.extend(inputDict[hashFunction])
    #                 tags = metadata['func_feature'][hashFunction]
    #                 for tag in tags:
    #                     ver_weight[tag] += weightFunc[hashFunction]
    #                     ver_common[tag] += 1
    #             tag_funccnt = self.get_ver_funccnt(ver_weight, name, ver_common)
    #             sortedByWeight = sorted(tag_funccnt.items(), key=lambda x: (x[1][0], x[1][1], x[0]), reverse=True)
    #             print(f"权重:{sortedByWeight}")
    #             predictedVer = sortedByWeight[0][0]  # 这是检测出重用的版本
    #
    #             predictedVer_repo = self.get_ver_repo(predictedVer, OSS)
    #             path_weight, used, unused, modified, strChange = self.path_weight_func_count(inputDict,
    #                                                                                          predictedVer_repo,
    #                                                                                          metadata['func_feature'])
    #             path_all = self.path_all_func_count(predictedVer_repo, metadata['func_feature'])
    #
    #             # 新的标准:
    #             # 计算文件内部重用比例，即一个文件重用多少函数
    #             file_reuse_ratio =self.calculate_func_reuse_ratio(path_weight,path_all)
    #             # 计算oss内部重用比例，即重用了多少文件
    #             ratio = float(len(file_reuse_ratio) / len(path_all))
    #
    #             mid_data = {
    #                 'name': OSS,
    #                 'ver_weight': ver_weight,
    #                 'path_weight': path_weight,
    #                 'path_all': path_all,
    #                 'file_reuse_ratio': file_reuse_ratio,
    #                 'oss_ratio': ratio
    #             }
    #             mid_res.append(mid_data)
    #             if ratio > self.file_theta:
    #                 already_func.extend(commonFunc)
    #                 print(f"确定发生重用：{OSS}")
    #                 reuse_path  = dict(Counter(reuse_path))
    #                 report_data = {
    #                     'name': OSS,
    #                     'url': metadata['oss_url'],
    #                     'version': predictedVer,
    #                     'path': reuse_path,
    #                     'origin_func_cnt':metadata['sig_info_cnt'],
    #                     'feature_func_cnt':metadata['func_feature_cnt'],
    #                     'used_same': str(used),
    #                     'used_modified': str(modified),
    #                     'used_strChange': str(strChange),
    #                     'unused': unused
    #                 }
    #                 final_res.append(report_data)
    #     return final_res,mid_res

    def detector_main(self,inputDict,numProcess):
        """
        inputDict:目标代码的哈希结果
        numProcess:开辟几个进程检测
        """
        logger.info("[+] start detector")
        manager = multiprocessing.Manager()
        final_res=manager.list()
        mid_res=manager.list()
        already_func=manager.list()
        lock= manager.Lock()
        all_component_name=self.get_all_component_name()
        processes = []
        with multiprocessing.Pool(processes=numProcess) as pool:
            # 将任务分配给进程池
            pool.map(self.detector,
                     [(idx+1,list_arr,inputDict,final_res,mid_res,already_func,lock) for
                      idx, list_arr in enumerate(self.split_list(all_component_name, numProcess))])
        # for idx, list_arr in enumerate(self.split_list(all_component_name, numProcess)):  # 分割任务列表并获取进程编号
        #     process = multiprocessing.Process(target=self.detector, args=(idx+1,list_arr,inputDict,final_res,mid_res,already_func,lock))
        #     processes.append(process)
        # for process in processes:
        #     process.start()
        # for process in processes:
        #     process.join()  # 等待所有子进程结束后再执行主进程的代码

        if len(final_res)!=0:
            #对结果进行降序排序，抄的多的排在前面
            sortfinalres=sorted(list(final_res), key=lambda x: x['used_same'], reverse=True)
            return sortfinalres,mid_res
        else:
            return None,None


if __name__ == "__main__":
    detector=Detector(0.7,0.6)
    target_src_path = input("target_src_path:")
    target_src_repo = target_src_path.split('\\')[-1]
    resDict, fileCnt, funcCnt, lineCnt = detector.hashing(target_src_path)
    # harmonyProjects = HarmonyProjects()
    # res = harmonyProjects.get_one_hashing({'projectname': "third_party_ltp"}, {'hashing': 1})[0]
    # resDict = res['hashing']
    if len(resDict)!=0:
        final_res,mid_res=detector.detector_main(resDict,16)
        if final_res is not None:
            write_line_jl(os.path.join(resultPath, target_src_repo + ".jl"), final_res)
            write_line_jl(os.path.join(resultPath, target_src_repo + "middata.jl"), mid_res)

    else:
        print("目标代码非C文件，不做检测")

