"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
# 注意：安装的是py-tlsh 4.5.0而且只有在python3.9才正常
# 任务:将鸿蒙项目使用旧的CENTRIS扫描一遍，得出有可能被重用的组件
# 输入：一个存放鸿蒙源代码项目的文件夹路径harmony_source_repos
# 输出：1.存放到与该文件同级的alloss.json，记录所有项目所有依赖组件，去除重复
#        需要说明：alloss.jso中记录着{name:}
#      2.存放到harmony_repos_deps下的每个鸿蒙项目都有对应的扫描结果文件，这是为了后续测试需要，实际应用中也可以去掉

import math
import multiprocessing
import os
import re
import json
import threading
import tlsh
import subprocess
from src.common_utils.logger import logger
from src.common_utils import configure
from src.common_utils.utils_file import read_jl_list, read_jl
from tqdm import tqdm

# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)


"""GLOBALS"""
currentPath		= os.getcwd()
theta			= 0.1
finalDBPath		= configure.CONFIG['centris'] + "\\componentDB\\"
aveFuncPath		= configure.CONFIG['centris'] + "\\aveFuncs"
ctagsPath		= configure.CONFIG['ctags_path']
alloss_Path     =g_cur_dir+r'\data_out\centris_oss.jl'
exit_ossdep     =[]
componentDB = {}  # 初始化全局变量




def read_alloss(alloss_path):
    """
    读取已经识别出的组件依赖
    """
    if not os.path.exists(alloss_path):
        return []
    with open(alloss_path, 'r') as json_file:
        line = json_file.readline()
        while line:
            try:
                item = json.loads(line)
                exit_ossdep.append(item['url'])
            except Exception as e:
                print(f"❌ json load error, line={line}, msg={str(e)}")
            line = json_file.readline()
# Generate TLSH
def computeTlsh(string):
    string 	= str.encode(string)
    hs 		= tlsh.forcehash(string)
    return hs


def removeComment(string):
    # Code for removing C/C++ style comments. (Imported from VUDDY and ReDeBug.)
    # ref: https://github.com/squizz617/vuddy
    c_regex = re.compile(
        r'(?P<comment>//.*?$|[{}]+)|(?P<multilinecomment>/\*.*?\*/)|(?P<noncomment>\'(\\.|[^\\\'])*\'|"(\\.|[^\\"])*"|.[^/\'"]*)',
        re.DOTALL | re.MULTILINE)
    return ''.join([c.group('noncomment') for c in c_regex.finditer(string) if c.group('noncomment')])

def normalize(string):
    # Code for normalizing the input string.
    # LF and TAB literals, curly braces, and spaces are removed,
    # and all characters are lowercased.
    # ref: https://github.com/squizz617/vuddy
    return ''.join(string.replace('\n', '').replace('\r', '').replace('\t', '').replace('{', '').replace('}', '').split(' ')).lower()

def hashing(repoPath):
    # This function is for hashing C/C++ functions
    # Only consider ".c", ".cc", and ".cpp" files
    possible = (".c", ".cc", ".cpp")

    fileCnt  = 0
    funcCnt  = 0
    lineCnt  = 0

    resDict  = {}

    for path, dir, files in os.walk(repoPath):
        for file in files:
            filePath = os.path.join(path, file)

            if file.endswith(possible):
                try:
                    # Execute Ctgas command
                    functionList 	= subprocess.check_output(ctagsPath + ' -f - --kinds-C=* --fields=neKSt "' + filePath + '"', stderr=subprocess.STDOUT, shell=True).decode()
                    f = open(filePath, 'r', encoding = "UTF-8")
                    # For parsing functions
                    lines 		= f.readlines()
                    allFuncs 	= str(functionList).split('\n')
                    func   		= re.compile(r'(function)')
                    number 		= re.compile(r'(\d+)')
                    funcSearch	= re.compile(r'{([\S\s]*)}')
                    tmpString	= ""
                    funcBody	= ""

                    fileCnt 	+= 1

                    for i in allFuncs:
                        elemList	= re.sub(r'[\t\s ]{2,}', '', i)
                        elemList 	= elemList.split('\t')
                        funcBody 	= ""

                        if i != '' and len(elemList) >= 8 and func.fullmatch(elemList[3]):
                            funcStartLine 	 = int(number.search(elemList[4]).group(0))
                            funcEndLine 	 = int(number.search(elemList[7]).group(0))

                            tmpString	= ""
                            tmpString	= tmpString.join(lines[funcStartLine - 1 : funcEndLine])

                            if funcSearch.search(tmpString):
                                funcBody = funcBody + funcSearch.search(tmpString).group(1)
                            else:
                                funcBody = " "

                            funcBody = removeComment(funcBody)
                            funcBody = normalize(funcBody)
                            funcHash = computeTlsh(funcBody)

                            if len(funcHash) == 72 and funcHash.startswith("T1"):
                                funcHash = funcHash[2:]
                            elif funcHash == "TNULL" or funcHash == "" or funcHash == "NULL":
                                continue

                            storedPath = filePath.replace(repoPath, "")
                            if funcHash not in resDict:
                                resDict[funcHash] = []
                            resDict[funcHash].append(storedPath)

                            lineCnt += len(lines)
                            funcCnt += 1

                except subprocess.CalledProcessError as e:
                    print("Parser Error:", e)
                    logger.error('[+]  An error occurred', exc_info=True)
                    continue
                except Exception as e:
                    print ("Subprocess failed", e)
                    logger.error('[+]  An error occurred', exc_info=True)
                    continue
            # else:
            #     print("没有c文件")

    return resDict, fileCnt, funcCnt, lineCnt

def getAveFuncs():
    aveFuncs = {}
    with open(aveFuncPath, 'r', encoding = "UTF-8") as fp:
        aveFuncs = json.load(fp)
    return aveFuncs

def readComponentDB():
    print("读取组件库：")
    jsonLst 	= []

    for OSS in os.listdir(finalDBPath):
        componentDB[OSS] = []
        with open(finalDBPath + OSS, 'r', encoding = "UTF-8") as fp:
            jsonLst = json.load(fp)
            for eachHash in jsonLst:
                hashval = eachHash["hash"]
                componentDB[OSS].append(hashval)
    return componentDB

def detector(inputDict, inputRepo, componentDB,resultPath):
    fres		= open(resultPath + "centris_" + inputRepo+".json", 'w')
    aveFuncs 	= getAveFuncs()
    cnt = 0
    for OSS in componentDB:
        commonFunc 	= []
        repoName 	= OSS.split('_sig')[0]
        totOSSFuncs = float(aveFuncs[repoName])
        if totOSSFuncs == 0.0:
            continue
        comOSSFuncs = 0.0
        for hashval in componentDB[OSS]:
            if hashval in inputDict:
                commonFunc.append(hashval)
                comOSSFuncs += 1.0

        if (comOSSFuncs/totOSSFuncs) >= theta:
            #有一个组件被识别就记录一次
            OSS = "https://github.com/" + OSS.replace("@@", "/").replace("_sig", "")
            if OSS not in exit_ossdep:
                exit_ossdep.append(OSS)#存的是oss地址
                oss_name=OSS.split("/")[-1]#获取oss的名称
                name_result={"name":oss_name,"url":OSS,"source":"github"}
                with open(alloss_Path, 'a+', encoding='utf-8') as fp:
                    name_result_str = json.dumps(name_result)
                    fp.write(name_result_str + '\n')

            #记录该项目组件识别文档
            # path=[]
            # for hashFunction in commonFunc:
            #     for pth in inputDict[hashFunction]:
            #         if pth not in path:
            #             path.append(pth)
            # result={
            #     "name":OSS,
            #     "path":path
            # }
            # json_str = json.dumps(result)
            # fres.write(json_str + '\n')


    fres.close()


def main(inputPath, inputRepo, componentDB,resultPath):

    inputDict, fileCnt, funcCnt, lineCnt = hashing(inputPath)

    detector(inputDict, inputRepo, componentDB,resultPath)

def batch_process(resultPath:str=None,fileList:list=None):
    """

    :param resultPath: 扫描的结果应该存到的文件夹路径
    :param fileList: 应该扫描的项目路径列表
    :return:
    """
    open(alloss_Path, 'w').close()
    logger.info(f"[+] detector by Centris")
    if fileList is None:#如果没有给出项目路径列表，那就扫描鸿蒙项目
        targetpath = configure.CONFIG['harmony_src']
        # 挨个分析鸿蒙项目
        fileList = []
        for project in os.listdir(targetpath):
            fileList.append(''.join([targetpath, os.sep, project]))
    if resultPath is None:
        resultPath = configure.CONFIG['all_dep'] + "\\"
        shouldMake = [resultPath]
        for eachRepo in shouldMake:
            if not os.path.isdir(eachRepo):
                os.mkdir(eachRepo)
    global componentDB  # 引用全局变量
    if len(componentDB)==0:
        # 执行加载组件数据库的操作
        print("加载组件数据库...")
        componentDB = readComponentDB()
    else:
        print("直接使用已加载的组件数据库...")
    #读取已经识别出的组件
    read_alloss(alloss_Path)
    count=0
    for f in fileList:
        count=count+1
        print(f"{count}:处理项目{f}")
        if os.path.isdir(f):  # 如果是文件夹
            print("目标项目路径:", f)
        inputRepo = f.split('\\')[-1]
        main(f, inputRepo, componentDB,resultPath)
    print(f"共处理项目{count},识别组件{len(exit_ossdep)}个")
def split_list( list, n):
    """
    将数据n等分，分发给n个线程执行

    :param list: 原始完整list
    :param n: 切分数量，即线程数量
    :return: 切分后的list起始位置
    """
    step_count = math.ceil(len(list) / n)
    for i in range(0, len(list), step_count):
        end = i + step_count
        if end > len(list):
            end = len(list)
        # print(f"yield: {i}:{end}")
        yield list[i:end]
def main_multi(idx,fileList,resultPath,componentDB):
    for f in tqdm(fileList, total=len(fileList), desc=f"thread{idx} centris扫描进度", unit="个repo"):
        inputRepo = f.split('\\')[-1]
        main(f, inputRepo, componentDB, resultPath)

def batch_process_multi(resultPath:str=None,fileList:list=None,num_processes:int=1):
    """

    :param resultPath: 扫描的结果应该存到的文件夹路径
    :param fileList: 应该扫描的项目路径列表
    :return:
    """
    open(alloss_Path, 'w').close()
    logger.info(f"[+] detector by Centris")
    if fileList is None:#如果没有给出项目路径列表，那就扫描鸿蒙项目
        targetpath = configure.CONFIG['harmony_src']
        # 挨个分析鸿蒙项目
        fileList = []
        for project in os.listdir(targetpath):
            fileList.append(''.join([targetpath, os.sep, project]))
    if resultPath is None:
        resultPath = configure.CONFIG['all_dep'] + "\\"
        shouldMake = [resultPath]
        for eachRepo in shouldMake:
            if not os.path.isdir(eachRepo):
                os.mkdir(eachRepo)
    global componentDB  # 引用全局变量
    if len(componentDB) == 0:
        # 执行加载组件数据库的操作
        print("加载组件数据库...")
        componentDB =readComponentDB()
    else:
        print("直接使用已加载的组件数据库...")
    processes = []
    for idx, list_arr in enumerate(split_list(fileList, num_processes)):  # 分割任务列表并获取进程编号
        process = multiprocessing.Process(target=main_multi, args=(idx,list_arr,resultPath,componentDB))
        processes.append(process)
    for process in processes:
        process.start()
    for process in processes:
        process.join()  # 等待所有子进程结束后再执行主进程的代码


""" EXECUTE """
if __name__ == "__main__":
    # all_result_file = open(alloss_Path, 'a+')
    # inputPath=input("inputpath:")
    # componentDB = readComponentDB()
    # inputRepo = inputPath.split('\\')[-1]
    # print(inputRepo)
    # main(inputPath, inputRepo, componentDB,all_result_file)
    allprojects = read_jl(r"D:\wsj\open-harmony-project\src\oss_dataset_construction\data_out\this_time_cloned_projects.jl")
    batch_process_multi(fileList=allprojects,num_processes=10)

