"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
# 将已经下载到本地的oss预处理，提取各个版本的函数
# 函数哈希
# 建立版本索引
# 最终生成初始的签名库

import io
import multiprocessing
import os
import subprocess
import re
import math
import sys
import traceback
from collections import defaultdict
from datetime import datetime
from urllib.parse import urlparse

import click
import tlsh
from tqdm import tqdm

from src.common_utils import configure
from src.common_utils.utils_file import compress_dict, write_append_jl, read_jl, read_file_with_encoding_lines
from src.operate_db.InitialSig import InitialSig
from src.operate_db.RepoFunction_db import RepoFunction
from src.operate_db.TagInfo_db import TagInfo

"""GLOBALS"""
# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
sys.path.append(father_path)
ctagsPath		= configure.CONFIG['ctags_path']

# So far, do not change
class Preprocess:

    tagInfo=TagInfo()
    repoFunction=RepoFunction()
    initialSig=InitialSig()
    # alloss_path = g_cur_dir + r'\data_out\cloned_oss.jl'  # 所有oss的名字和地址
    # success_oss_path = g_cur_dir + r'\data_out\success_extract_oss.jl'  # 所有已经成功处理的oss的名字和地址
    alloss_path = os.path.join(configure.CONFIG['ossdb_construct'],'cloned_oss.jl')  # 所有oss的名字和地址
    success_oss_path = os.path.join(configure.CONFIG['ossdb_construct'],'success_extract_oss.jl')  # 所有已经成功处理的oss的名字和地址
    down_path = configure.CONFIG['dataset_repo']  # 源码存放处
    # error_oss_path = g_cur_dir + r'\data_out\error_extract_oss.jl'  # 生成签名失败的oss
    error_oss_path = os.path.join(configure.CONFIG['ossdb_construct'],'error_extract_oss.jl') # 生成签名失败的oss
    error_extract_oss = []
    exist_oss = []
    process_oss = []

    def __init__(self):
        all_oss = read_jl(self.alloss_path)
        self.error_extract_oss = read_jl(self.error_oss_path)
        self.exist_oss = read_jl(self.success_oss_path)
        self.process_oss = [item for item in all_oss if item not in self.exist_oss]
    def computeTlsh(self,string):
        string = str.encode(string)
        hs = tlsh.forcehash(string)
        return hs

    def removeComment(self,string):
        # Code for removing C/C++ style comments. (Imported from VUDDY and ReDeBug.)
        # ref: https://github.com/squizz617/vuddy
        c_regex = re.compile(
            r'(?P<comment>//.*?$|[{}]+)|(?P<multilinecomment>/\*.*?\*/)|(?P<noncomment>\'(\\.|[^\\\'])*\'|"(\\.|[^\\"])*"|.[^/\'"]*)',
            re.DOTALL | re.MULTILINE)
        return ''.join([c.group('noncomment') for c in c_regex.finditer(string) if c.group('noncomment')])

    def normalize(self,string):
        # Code for normalizing the input string.
        # LF and TAB literals, curly braces, and spaces are removed,
        # and all characters are lowercased.
        # ref: https://github.com/squizz617/vuddy
        return ''.join(
            string.replace('\n', '').replace('\r', '').replace('\t', '').replace('{', '').replace('}', '').split(
                ' ')).lower()

    def clean_url(self,urlstr):
        # 去除网址中间的多余斜杠
        clean_url = re.sub(r"(?<!:)/{2,}", "/", urlstr)
        clean_url = clean_url.strip()  # 去掉空格
        # 先去除字符串中的转义字符
        parsed_url = urlparse(clean_url)
        clean_url = parsed_url.geturl()
        # 去除网址末尾的多余斜杠
        if clean_url.endswith("/"):
            clean_url = clean_url.rstrip("/")
        if clean_url.endswith(".git"):
            clean_url = clean_url[:-4]  # 去除末尾的 ".git"，切片操作
        return clean_url.lower()  # 认为主流网站对大小写不敏感
    def store_to_tagInfo(self,tag_time,sig_full_name):
        self.tagInfo.add_one(
            {
                'sig_full_name': sig_full_name,
                'tag_time': tag_time
            }
        )
    def store_to_initialSig(self,oss,func_dict,ver_cnt):
        oss_url = self.clean_url(oss['oss_url'])
        repo_name = oss_url.split('/')[-1].replace(".git", "")
        repo_full_name = oss['repo_full_name']
        repo_path = oss['repo_path']
        src_local_path = os.path.relpath(repo_path, configure.CONFIG['local_data_path'])
        if len(func_dict) != 0:
            func_dict_zip = compress_dict(func_dict)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(func_dict_zip)
            if bson_size <= threshold:
                data = {
                    'sig_full_name': repo_full_name,
                    'repo_name': repo_name,
                    'oss_name': oss['oss_name'],
                    'oss_source': oss['source'],
                    'oss_url': oss_url,
                    'src_local_path': src_local_path,
                    'sig_info': func_dict_zip,
                    'sig_info_cnt': len(func_dict),
                    'ver_cnt': ver_cnt
                }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                sig_info_id = self.initialSig.use_Gridfs_store(io.BytesIO(func_dict_zip), repo_full_name + ".gz")
                data = {
                    'sig_full_name': repo_full_name,
                    'repo_name': repo_name,
                    'oss_name': oss['oss_name'],
                    'oss_source': oss['source'],
                    'oss_url': oss_url,
                    'src_local_path': src_local_path,
                    'sig_info': sig_info_id,
                    'sig_info_cnt': len(func_dict),
                    'ver_cnt': ver_cnt

                }
            self.initialSig.add_one(data)
    def store_to_Func_Src(self,oss,func_src_dict):
        repo_full_name = oss['repo_full_name']
        repo_path = oss['repo_path']
        src_local_path = os.path.relpath(repo_path, configure.CONFIG['local_data_path'])
        if len(func_src_dict) != 0:
            func_dict_zip = compress_dict(func_src_dict)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(func_dict_zip)
            if bson_size <= threshold:
                data = {
                    'sig_full_name': repo_full_name,
                    'src_local_path': src_local_path,
                    'func_src': func_dict_zip
                }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                sig_info_id = self.function_Src.use_Gridfs_store(io.BytesIO(func_dict_zip), repo_full_name + ".gz")
                data = {
                    'sig_full_name': repo_full_name,
                    'src_local_path': src_local_path,
                    'func_src': sig_info_id
                }
            self.function_Src.add_one(data)
    def store_to_RepoFunc(self,resDict,sig_full_tag_name,fileCnt,funcCnt):
        if len(resDict) != 0:
            func_dict_zip = compress_dict(resDict)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(func_dict_zip)
            if bson_size <= threshold:
                data={
                        'sig_full_tag_name': sig_full_tag_name,
                        'tag_func': func_dict_zip,
                        'file_cnt': fileCnt,
                        'func_cnt': funcCnt
                    }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                sig_info_id = self.repoFunction.use_Gridfs_store(io.BytesIO(func_dict_zip), sig_full_tag_name + ".gz")
                data = {
                    'sig_full_tag_name': sig_full_tag_name,
                    'tag_func': sig_info_id,
                    'file_cnt': fileCnt,
                    'func_cnt': funcCnt
                }
            self.repoFunction.add_data(data)



    def parse_files_with_tag(self, repoPath, func_dict, func_src_dict,tag, time):
        """
        给每个tag生成签名
        包括提取该tag下的函数，哈希等操作
        :param repoPath: 现在处理的这个oss在本地的路径
        :param func_dict:这个oss素有版本所有函数的签名
        :param tag:现在处理的tag版本名
        :param time:这个版本tag的发布时间
        :return:func_dict, fileCnt, funcCnt, func_src_dict,lineCnt,resDict
        resDict={}是这个版本的签名，不接受返回值就是直接向func_dict加入新函数
        fileCnt, funcCnt, lineCnt基数作用，统计文件数，函数数目，行数，但这都是一个版本的
        """
        # This function is for hashing C/C++ functions
        # Only consider ".c", ".cc", and ".cpp" files
        # print(f"ctags正在提取函数和哈希中......")
        possible = (".c", ".cc", ".cpp")

        fileCnt = 0
        funcCnt = 0
        lineCnt = 0
        resDict=defaultdict(set)
        for path, dir, files in os.walk(repoPath):
            for file in files:
                filePath = os.path.join(path, file)  # 这是该项目每个文件的路径，为了之后ctag每个文件扫描

                if file.endswith(possible):
                    try:
                        # Execute Ctgas command
                        functionList = subprocess.check_output(
                            ctagsPath + ' -f - --kinds-C=* --fields=neKSt "' + filePath + '"', stderr=subprocess.STDOUT,
                            shell=True).decode()
                        # For parsing functions
                        lines = read_file_with_encoding_lines(filePath)
                        allFuncs = str(functionList).split('\n')
                        func = re.compile(r'(function)')
                        number = re.compile(r'(\d+)')
                        funcSearch = re.compile(r'{([\S\s]*)}')
                        tmpString = ""
                        funcBody = ""

                        fileCnt += 1

                        for i in allFuncs:  # 处理每个函数
                            elemList = re.sub(r'[\t\s ]{2,}', '', i)
                            elemList = elemList.split('\t')
                            funcBody = ""

                            if i != '' and len(elemList) >= 8 and func.fullmatch(elemList[3]):
                                funcStartLine = int(number.search(elemList[4]).group(0))
                                funcEndLine = int(number.search(elemList[7]).group(0))

                                tmpString = ""
                                tmpString = tmpString.join(lines[funcStartLine - 1: funcEndLine])

                                if funcSearch.search(tmpString):
                                    funcBody = funcBody + funcSearch.search(tmpString).group(1)
                                else:
                                    funcBody = " "

                                funcBody = self.removeComment(funcBody)
                                funcBody = self.normalize(funcBody)
                                funcsrc = funcBody  # 记录函数的源码,已经去掉了注释等内容
                                funcHash = self.computeTlsh(funcBody)  # 这是把函数内容哈希

                                if len(funcHash) == 72 and funcHash.startswith("T1"):
                                    funcHash = funcHash[2:]
                                elif funcHash == "TNULL" or funcHash == "" or funcHash == "NULL":
                                    continue
                                rel_path = filePath.replace(repoPath, "")  # 该函数的相对路径
                                func_dict[funcHash].add(tag)
                                func_src_dict[funcHash].add(funcsrc)
                                resDict[funcHash].add(rel_path)
                                lineCnt += len(lines)
                                funcCnt += 1

                    except subprocess.CalledProcessError as e:
                        print("parse_files_with_tag Parser Error:", e)
                        continue
                    except Exception as e:
                        print("parse_files_with_tag Subprocess failed", e)
                        continue
        return func_dict, fileCnt, funcCnt, func_src_dict,lineCnt,resDict

    def calculate_years_since(self,commit_time):

        try:
            current_time = datetime.utcnow()
            time_difference = current_time - commit_time
            total_seconds = time_difference.total_seconds()
            years_since = total_seconds / (365.2425 * 24 * 60 * 60)
            return years_since
        except Exception as e:
            print(str(e))

    def calculate_months_between_tags(self,tag1_time, tag2_time):
        """
        注意，tag1time是大时间，靠近现在的时间
        """
        try:
            tag1_time = datetime.strptime(tag1_time, "%Y-%m-%d %H:%M:%S")
            tag2_time = datetime.strptime(tag2_time, "%Y-%m-%d %H:%M:%S")
            time_difference = tag1_time - tag2_time
            total_days = time_difference.total_seconds() / (24 * 60 * 60)
            months_between_tags = total_days / 30.44  # Average days in a month
            return months_between_tags
        except Exception as e:
            print(str(e))

    def get_recent_tags(self, tag_time, oss_url):
        print(f"寻找近期版本:tag_tima:{len(tag_time)}")
        recent_tags = []
        flag = False
        if len(tag_time) <= 1:
            for tag, time in tag_time.items():
                recent_tags.append(tag)
        else:
            year = 0.0
            if "salsa.debian.org" in oss_url:
                for tag, time in tag_time.items():
                    if tag.startswith("upstream"):
                        flag = True
                        break
            while len(recent_tags) <= 1:  # 没有近3年的版本,或者版本太少
                if year>=10.0:
                    break
                for tag, time in tag_time.items():
                    time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
                    year_since = self.calculate_years_since(time)
                    if year_since < 4.0 + year and tag not in recent_tags:
                        if flag == True:
                            if tag.startswith("upstream"):
                                recent_tags.append(tag)
                        else:
                            recent_tags.append(tag)
                year += 1.0
                # 以上获取两年内的tags
        if len(recent_tags) > 100:
            return recent_tags[0:100]
        else:
            return recent_tags
    def generate_signatures(self, repo_path,_idx,oss_url):
        """
        生成签名的核心函数
        @param repo_path: 本地仓库路径
        @param _idx: 进程编号
        @param oss_url: 本地仓库对应的远程url地址
        @return: func_dict,func_src_dict,ver_cnt,tag_date
        """
        # 执行git tag等切换分支，提取函数签名
        try:
            # print(f"thread{_idx} 给仓库{repo_path}生成签名：")
            func_dict = defaultdict(set)
            func_src_dict=defaultdict(set)
            ver_cnt=0
            tag_date = []
            tag_command = "git tag"
            tag_result = subprocess.check_output(
                # tag_command, stderr=subprocess.STDOUT, cwd=repo_path,shell=True
                tag_command, stderr=subprocess.DEVNULL, cwd=repo_path, shell=True
            ).decode()
            # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ntag result:{tag_result}")

            data_command = 'git log --tags --simplify-by-decoration --pretty="format:%ai %d"'
            data_result = subprocess.check_output(
                # data_command, stderr=subprocess.STDOUT,cwd=repo_path, shell=True
                data_command, stderr=subprocess.DEVNULL, cwd=repo_path, shell=True
            ).decode()
            # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ndata_result:{data_result}")

            tag_time = {}
            for tag_info in data_result.split('\n'):
                # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ntag_info{tag_info}")
                m = re.match(r".*tag: (?P<tag_name>.*)[),]", tag_info)
                if m:
                    tag_time[
                        m.groupdict()['tag_name'].split(',')[0]
                    ] = " ".join(tag_info.split()[:2])
            # 如果有多个版本切换分支
            if tag_result == "" :
                # No tags, only master repo
                # 对于这样的项目，版本和日期都取最新提交的
                master_command = 'git log --oneline --simplify-by-decoration --max-count=1 --pretty="format:%h %ai %d"'
                master_result = subprocess.check_output(
                    # master_command, stderr=subprocess.STDOUT, cwd=repo_path,shell=True
                    master_command, stderr=subprocess.DEVNULL , cwd=repo_path, shell=True
                ).decode()
                # print(f"thread{_idx} 给仓库{repo_path}生成签名：\nmaster_result:{master_result}")
                m = re.match(r"(?P<commit_id>\w+)\s+(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [-+]\d{4}).*",
                             master_result)
                if m:
                    commit_id = m.groupdict()['commit_id']
                    date_time = m.groupdict()['date']
                    func_dict, fileCnt, funcCnt, func_src_dict,lineCnt,resDict= self.parse_files_with_tag(repo_path, func_dict,func_src_dict, "master_" + commit_id,date_time)
                    if funcCnt != 0:
                        ver_cnt += 1
                        tag_date=[("master_" + commit_id,date_time)]
                        self.store_to_RepoFunc(resDict, os.path.basename(repo_path)+"@@"+"master_" + commit_id, fileCnt, funcCnt)
            else:
                recent_tags = self.get_recent_tags(tag_time, oss_url)
                # print(f"最近的tag:{recent_tags,len(recent_tags)}")
                # print(f"thread{_idx} 给仓库{repo_path}生成签名：\nrecenttags:{recent_tags}")
                for tag in recent_tags:  # 如果有版本tag，那就开始切换分支，这个版本处理
                    # Generate function hashes for each t ag (version)
                    if tag == '':
                        continue
                    # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ntag: ", tag)
                    checkoutCommand = "git checkout -f " + tag
                    try:
                        subprocess.check_output(checkoutCommand, stderr=subprocess.STDOUT,cwd=repo_path, shell=True)
                        func_dict, fileCnt, funcCnt, func_src_dict, lineCnt, resDict= self.parse_files_with_tag(repo_path, func_dict,func_src_dict, tag,
                                                                                       tag_time[tag])
                        if funcCnt!=0:
                            ver_cnt += 1
                            tag_date.append((tag,tag_time[tag]))
                            self.store_to_RepoFunc(resDict, os.path.basename(repo_path)+"@@"+tag, fileCnt, funcCnt)
                    except Exception as e:
                        # print(f"thread{_idx} 给仓库{repo_path}生成签名：\n call parse_files_with_tag error :{repo_path}")
                        traceback.print_exc()
                        continue
            return func_dict,func_src_dict,ver_cnt,tag_date
        except Exception as e:
            print(f"thread{_idx} 给仓库{repo_path}生成签名：\ngenerate_signatures error :{repo_path}")
            traceback.print_exc()




    def generate_repo_signature(self,_idx,list_arr):
        """
        进程执行的子函数
        给本地仓库生成签名的主函数
        @param _idx: 进程编号
        @param list_arr: 进程任务列表
        @return: 修改数据库
        """
        for oss in  tqdm(list_arr,total=len(list_arr), desc=f"Thread{_idx} repo签名生成进度", unit="个repo"):#待处理的oss项目总数百分比显示
            try:
                oss_url=self.clean_url(oss['oss_url'])
                repo_full_name=oss['repo_full_name']
                repo_path = oss['repo_path']
                #注意已经有了签名文件就不再生成了
                if len(self.tagInfo.get({'sig_full_name':repo_full_name},{"_id":1}))==0 and os.path.exists(repo_path):
                    func_dict,func_src_dict,ver_cnt,tag_date=self.generate_signatures(repo_path,_idx,oss_url)
                    if len(func_dict)!=0:
                        self.store_to_initialSig(oss,func_dict,ver_cnt)
                    if len(func_src_dict)!=0:
                        #self.store_to_Func_Src(oss,func_src_dict)
                        pass
                    if len(tag_date)!=0:
                        self.store_to_tagInfo(tag_date,repo_full_name)
                if oss not in self.exist_oss:
                    self.exist_oss.append(oss)
                    write_append_jl(self.success_oss_path,oss)
            except Exception as e:
                if oss not in self.error_extract_oss:
                    write_append_jl(self.error_oss_path,oss)
                print("clone_extract_signatures error:", e)
                traceback.print_exc()
                continue

    def split_list(self, list, n):
        """
        将数据n等分，分发给n个线程执行

        :param list: 原始完整list
        :param n: 切分数量，即线程数量
        :return: 切分后的list起始位置
        """
        step_count = math.ceil(len(list) / n)
        for i in range(0, len(list), step_count):
            end = i + step_count
            if end > len(list):
                end = len(list)
            # print(f"yield: {i}:{end}")
            yield list[i:end]

    def main(self,processList=None,num_processes=1):
        if processList is None:
            processList=self.process_oss
        if len(processList)==0:
            print("没有需要处理的仓库")
        else:
            print(f"共计要处理:{len(self.process_oss)}")
            processes = []
            for idx, list_arr in enumerate(self.split_list(processList, num_processes)):  # 分割任务列表并获取进程编号
                process = multiprocessing.Process(target=self.generate_repo_signature, args=(idx + 1, list_arr))
                processes.append(process)
            for process in processes:
                process.start()
            for process in processes:
                process.join()  # 等待所有子进程结束后再执行主进程的代码
@click.command()
@click.option('--cpu', type=int, default=8, help='并发执行数')
def main(cpu):
    preprocess = Preprocess()
    preprocess.main(None, cpu)
if __name__ == "__main__":
   main()