"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
# 任务：对于本地已经下载好的开源软件进行更新，就是向数据库中添加新版本的特征
# 步骤：1.首先获取initialsig库中旧的信息，taginfo中旧的信息
#      2.定位到实体包所在位置，执行git命令，找到所有符合收集策略的版本，找到之前没有收集的版本，切换版本分支收集，将该版本信息存入repofunction库
#      3.之后在旧签名sig_info的基础上更新，再将新数据更新到数据库

import math
import multiprocessing
import os
import io
import re
import subprocess
import sys
import traceback
from datetime import datetime
from collections import defaultdict

from urllib.parse import urlparse

import click
import tlsh
import bson
from tqdm import tqdm

from src.common_utils import configure
from src.common_utils.utils_file import read_file_with_encoding_lines, compress_dict
from src.operate_db.InitialSig import InitialSig
from src.operate_db.RepoFunction_db import RepoFunction
from src.operate_db.TagInfo_db import TagInfo
ctagsPath		= configure.CONFIG['ctags_path']
# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
sys.path.append(father_path)
class UpdateOSS:
    tagInfo = TagInfo() #存放每个开源软件版本信息
    repoFunction = RepoFunction() #存放每个开源软件每个版本具体函数信息
    initialSig = InitialSig()  #初始签名库
    oss_local_src_path = configure.CONFIG['local_data_path'] # 开源软件源码存放处

    def computeTlsh(self, string):
        string = str.encode(string)
        hs = tlsh.forcehash(string)
        return hs

    def removeComment(self, string):
        # Code for removing C/C++ style comments. (Imported from VUDDY and ReDeBug.)
        # ref: https://github.com/squizz617/vuddy
        c_regex = re.compile(
            r'(?P<comment>//.*?$|[{}]+)|(?P<multilinecomment>/\*.*?\*/)|(?P<noncomment>\'(\\.|[^\\\'])*\'|"(\\.|[^\\"])*"|.[^/\'"]*)',
            re.DOTALL | re.MULTILINE)
        return ''.join([c.group('noncomment') for c in c_regex.finditer(string) if c.group('noncomment')])

    def normalize(self, string):
        # Code for normalizing the input string.
        # LF and TAB literals, curly braces, and spaces are removed,
        # and all characters are lowercased.
        # ref: https://github.com/squizz617/vuddy
        return ''.join(
            string.replace('\n', '').replace('\r', '').replace('\t', '').replace('{', '').replace('}', '').split(
                ' ')).lower()

    def clean_url(self, urlstr):
        # 去除网址中间的多余斜杠
        clean_url = re.sub(r"(?<!:)/{2,}", "/", urlstr)
        clean_url = clean_url.strip()  # 去掉空格
        # 先去除字符串中的转义字符
        parsed_url = urlparse(clean_url)
        clean_url = parsed_url.geturl()
        # 去除网址末尾的多余斜杠
        if clean_url.endswith("/"):
            clean_url = clean_url.rstrip("/")
        if clean_url.endswith(".git"):
            clean_url = clean_url[:-4]  # 去除末尾的 ".git"，切片操作
        return clean_url.lower()  # 认为主流网站对大小写不敏感

    def update_to_tagInfo(self,ossid, tag_time):
        self.tagInfo.update_one_with_id(ossid,
            {
                'tag_time': tag_time
            }
        )

    def update_to_initialSig(self, ossid,sig_full_name, func_dict, ver_cnt,fileid=None):
        if len(func_dict) != 0:
            func_dict_zip = compress_dict(func_dict)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(func_dict_zip)
            if bson_size <= threshold:
                data = {
                    'sig_info': func_dict_zip,
                    'sig_info_cnt': len(func_dict),
                    'ver_cnt': ver_cnt
                }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                if fileid is not None:
                    print(f'删除原来的{fileid}')
                    self.initialSig.use_Gridfs_delete(fileid)
                sig_info_id = self.initialSig.use_Gridfs_store(io.BytesIO(func_dict_zip),  sig_full_name+ ".gz")
                data = {
                    'sig_info': sig_info_id,
                    'sig_info_cnt': len(func_dict),
                    'ver_cnt': ver_cnt

                }
            self.initialSig.update_one_with_id(ossid,data)
            print("更新完成")

    def store_to_Func_Src(self, oss, func_src_dict):
        repo_full_name = oss['repo_full_name']
        repo_path = oss['repo_path']
        src_local_path = os.path.relpath(repo_path, configure.CONFIG['local_data_path'])
        if len(func_src_dict) != 0:
            func_dict_zip = compress_dict(func_src_dict)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(func_dict_zip)
            if bson_size <= threshold:
                data = {
                    'sig_full_name': repo_full_name,
                    'src_local_path': src_local_path,
                    'func_src': func_dict_zip
                }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                sig_info_id = self.function_Src.use_Gridfs_store(io.BytesIO(func_dict_zip), repo_full_name + ".gz")
                data = {
                    'sig_full_name': repo_full_name,
                    'src_local_path': src_local_path,
                    'func_src': sig_info_id
                }
            self.function_Src.add_one(data)

    def store_to_RepoFunc(self, resDict, sig_full_tag_name, fileCnt, funcCnt):
        if len(resDict) != 0:
            func_dict_zip = compress_dict(resDict)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(func_dict_zip)
            if bson_size <= threshold:
                data = {
                    'sig_full_tag_name': sig_full_tag_name,
                    'tag_func': func_dict_zip,
                    'file_cnt': fileCnt,
                    'func_cnt': funcCnt
                }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                sig_info_id = self.repoFunction.use_Gridfs_store(io.BytesIO(func_dict_zip), sig_full_tag_name + ".gz")
                data = {
                    'sig_full_tag_name': sig_full_tag_name,
                    'tag_func': sig_info_id,
                    'file_cnt': fileCnt,
                    'func_cnt': funcCnt
                }
            self.repoFunction.add_data(data)

    def parse_files_with_tag(self, repoPath, func_dict, func_src_dict, tag, time):
        """
        给每个tag生成签名
        包括提取该tag下的函数，哈希等操作
        :param repoPath: 现在处理的这个oss在本地的路径
        :param func_dict:这个oss素有版本所有函数的签名
        :param tag:现在处理的tag版本名
        :param time:这个版本tag的发布时间
        :return:func_dict, fileCnt, funcCnt, func_src_dict,lineCnt,resDict
        resDict={}是这个版本的签名，不接受返回值就是直接向func_dict加入新函数
        fileCnt, funcCnt, lineCnt基数作用，统计文件数，函数数目，行数，但这都是一个版本的
        """
        # This function is for hashing C/C++ functions
        # Only consider ".c", ".cc", and ".cpp" files
        # print(f"ctags正在提取函数和哈希中......")
        possible = (".c", ".cc", ".cpp")

        fileCnt = 0
        funcCnt = 0
        lineCnt = 0
        resDict = defaultdict(set)
        for path, dir, files in os.walk(repoPath):
            for file in files:
                filePath = os.path.join(path, file)  # 这是该项目每个文件的路径，为了之后ctag每个文件扫描

                if file.endswith(possible):
                    try:
                        # Execute Ctgas command
                        functionList = subprocess.check_output(
                            ctagsPath + ' -f - --kinds-C=* --fields=neKSt "' + filePath + '"', stderr=subprocess.STDOUT,
                            shell=True).decode()
                        # For parsing functions
                        lines = read_file_with_encoding_lines(filePath)
                        allFuncs = str(functionList).split('\n')
                        func = re.compile(r'(function)')
                        number = re.compile(r'(\d+)')
                        funcSearch = re.compile(r'{([\S\s]*)}')
                        tmpString = ""
                        funcBody = ""

                        fileCnt += 1

                        for i in allFuncs:  # 处理每个函数
                            elemList = re.sub(r'[\t\s ]{2,}', '', i)
                            elemList = elemList.split('\t')
                            funcBody = ""

                            if i != '' and len(elemList) >= 8 and func.fullmatch(elemList[3]):
                                funcStartLine = int(number.search(elemList[4]).group(0))
                                funcEndLine = int(number.search(elemList[7]).group(0))

                                tmpString = ""
                                tmpString = tmpString.join(lines[funcStartLine - 1: funcEndLine])

                                if funcSearch.search(tmpString):
                                    funcBody = funcBody + funcSearch.search(tmpString).group(1)
                                else:
                                    funcBody = " "

                                funcBody = self.removeComment(funcBody)
                                funcBody = self.normalize(funcBody)
                                funcsrc = funcBody  # 记录函数的源码,已经去掉了注释等内容
                                funcHash = self.computeTlsh(funcBody)  # 这是把函数内容哈希

                                if len(funcHash) == 72 and funcHash.startswith("T1"):
                                    funcHash = funcHash[2:]
                                elif funcHash == "TNULL" or funcHash == "" or funcHash == "NULL":
                                    continue
                                rel_path = filePath.replace(repoPath, "")  # 该函数的相对路径
                                func_dict[funcHash].add(tag)
                                func_src_dict[funcHash].add(funcsrc)
                                resDict[funcHash].add(rel_path)
                                lineCnt += len(lines)
                                funcCnt += 1

                    except subprocess.CalledProcessError as e:

                        traceback.print_exc()
                        continue
                    except Exception as e:
                        traceback.print_exc()
                        continue
        return func_dict, fileCnt, funcCnt, func_src_dict, lineCnt, resDict

    def calculate_years_since(self, commit_time):

        try:
            current_time = datetime.utcnow()
            time_difference = current_time - commit_time
            total_seconds = time_difference.total_seconds()
            years_since = total_seconds / (365.2425 * 24 * 60 * 60)
            return years_since
        except Exception as e:
            print(str(e))

    def calculate_months_between_tags(self, tag1_time, tag2_time):
        """
        注意，tag1time是大时间，靠近现在的时间
        """
        try:
            tag1_time = datetime.strptime(tag1_time, "%Y-%m-%d %H:%M:%S")
            tag2_time = datetime.strptime(tag2_time, "%Y-%m-%d %H:%M:%S")
            time_difference = tag1_time - tag2_time
            total_days = time_difference.total_seconds() / (24 * 60 * 60)
            months_between_tags = total_days / 30.44  # Average days in a month
            return months_between_tags
        except Exception as e:
            print(str(e))

    def get_recent_tags(self, tag_time, oss_url):
        print(f"寻找近期版本:tag_tima:{len(tag_time)}")
        recent_tags = []
        flag = False
        if len(tag_time) <= 1:
            for tag, time in tag_time.items():
                recent_tags.append(tag)
        else:
            year = 0.0
            if "salsa.debian.org" in oss_url:
                for tag, time in tag_time.items():
                    if tag.startswith("upstream"):
                        flag = True
                        break
            while len(recent_tags) <= 1:  # 没有近3年的版本,或者版本太少
                if year >= 10.0:
                    break
                for tag, time in tag_time.items():
                    time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
                    year_since = self.calculate_years_since(time)
                    if year_since < 4.0 + year and tag not in recent_tags:
                        if flag == True:
                            if tag.startswith("upstream"):
                                recent_tags.append(tag)
                        else:
                            recent_tags.append(tag)
                year += 1.0
                # 以上获取两年内的tags
        if len(recent_tags) > 100:
            return recent_tags[0:100]
        else:
            return recent_tags

    def generate_signatures(self,repo_path, _idx, oss_url,already_sig_info,already_tag,IsUpdate):
        """
        生成签名的核心函数
        @param repo_path: 本地仓库路径
        @param _idx: 进程编号
        @param oss_url: 本地仓库对应的远程url地址
        @return: func_dict,func_src_dict,ver_cnt,tag_date
        """
        # 执行git tag等切换分支，提取函数签名
        try:
            # print(f"thread{_idx} 给仓库{repo_path}生成签名：")
            func_dict = already_sig_info
            func_src_dict = defaultdict(set)
            ver_cnt = len(already_tag['tag_time'])
            tag_date =already_tag['tag_time']
            already_tag_name=[name[0] for name in already_tag['tag_time']]
            print(f'查看已有tag名：{already_tag_name}')
            tag_command = "git tag"
            tag_result = subprocess.check_output(
                # tag_command, stderr=subprocess.STDOUT, cwd=repo_path,shell=True
                tag_command, stderr=subprocess.DEVNULL, cwd=repo_path, shell=True
            ).decode()
            # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ntag result:{tag_result}")

            data_command = 'git log --tags --simplify-by-decoration --pretty="format:%ai %d"'
            data_result = subprocess.check_output(
                # data_command, stderr=subprocess.STDOUT,cwd=repo_path, shell=True
                data_command, stderr=subprocess.DEVNULL, cwd=repo_path, shell=True
            ).decode()
            # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ndata_result:{data_result}")

            tag_time = {}
            for tag_info in data_result.split('\n'):
                # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ntag_info{tag_info}")
                m = re.match(r".*tag: (?P<tag_name>.*)[),]", tag_info)
                if m:
                    tag_time[
                        m.groupdict()['tag_name'].split(',')[0]
                    ] = " ".join(tag_info.split()[:2])
            # 如果有多个版本切换分支
            if tag_result == "":
                # No tags, only master repo
                # 对于这样的项目，版本和日期都取最新提交的
                master_command = 'git log --oneline --simplify-by-decoration --max-count=1 --pretty="format:%h %ai %d"'
                master_result = subprocess.check_output(
                    # master_command, stderr=subprocess.STDOUT, cwd=repo_path,shell=True
                    master_command, stderr=subprocess.DEVNULL, cwd=repo_path, shell=True
                ).decode()
                # print(f"thread{_idx} 给仓库{repo_path}生成签名：\nmaster_result:{master_result}")
                m = re.match(r"(?P<commit_id>\w+)\s+(?P<date>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [-+]\d{4}).*",
                             master_result)
                if m:
                    commit_id = m.groupdict()['commit_id']
                    date_time = m.groupdict()['date']
                    if "master_" + commit_id not in already_tag_name:
                        func_dict, fileCnt, funcCnt, func_src_dict, lineCnt, resDict = self.parse_files_with_tag(repo_path,
                                                                                                                 func_dict,
                                                                                                                 func_src_dict,
                                                                                                                 "master_" + commit_id,
                                                                                                                 date_time)
                        if funcCnt != 0:
                            IsUpdate = True  # 表明有更新
                            ver_cnt += 1
                            tag_date .append(("master_" + commit_id, date_time))  #给旧版本信息添加新元素
                            self.store_to_RepoFunc(resDict, os.path.basename(repo_path) + "@@" + "master_" + commit_id,
                                                   fileCnt, funcCnt)
            else:
                recent_tags = self.get_recent_tags(tag_time, oss_url)
                # print(f"最近的tag:{recent_tags,len(recent_tags)}")
                # print(f"thread{_idx} 给仓库{repo_path}生成签名：\nrecenttags:{recent_tags}")
                for tag in recent_tags:  # 如果有版本tag，那就开始切换分支，这个版本处理
                    # Generate function hashes for each t ag (version)
                    if tag == '' or tag in already_tag_name:
                        print(f'{repo_path}:不需要更新：{tag}')
                        continue
                    # print(f"thread{_idx} 给仓库{repo_path}生成签名：\ntag: ", tag)
                    checkoutCommand = "git checkout -f " + tag
                    try:
                        print(f'更新：{repo_path},{tag}')
                        subprocess.check_output(checkoutCommand, stderr=subprocess.STDOUT, cwd=repo_path, shell=True)
                        func_dict, fileCnt, funcCnt, func_src_dict, lineCnt, resDict = self.parse_files_with_tag(
                            repo_path, func_dict, func_src_dict, tag,
                            tag_time[tag])
                        if funcCnt != 0:
                            IsUpdate = True  # 表明有更新
                            ver_cnt += 1
                            tag_date.append((tag, tag_time[tag]))
                            self.store_to_RepoFunc(resDict, os.path.basename(repo_path) + "@@" + tag, fileCnt, funcCnt)
                    except Exception as e:
                        # print(f"thread{_idx} 给仓库{repo_path}生成签名：\n call parse_files_with_tag error :{repo_path}")
                        traceback.print_exc()
                        continue

            return func_dict, func_src_dict, ver_cnt, tag_date,IsUpdate
        except Exception as e:
            print(f"thread{_idx} 给仓库{repo_path}生成签名：\ngenerate_signatures error :{repo_path}")
            traceback.print_exc()

    def update_repo_signature(self, _idx, list_arr):
        """
        进程执行的子函数
        给本地仓库生成签名的主函数
        @param _idx: 进程编号
        @param list_arr: 进程任务列表
        @return: 修改数据库
        """
        for oss in tqdm(list_arr, total=len(list_arr), desc=f"Thread{_idx} repo签名生成进度",unit="个repo"):  # 待处理的oss项目总数百分比显示
            try:
                print(f"处理：{oss['sig_full_name']}")
                oss_url = self.clean_url(oss['oss_url'])
                sig_full_name = oss['sig_full_name']
                repo_path =os.path.join(self.oss_local_src_path,oss['src_local_path'])
                already_sig_info=oss['sig_info']
                already_tag = self.tagInfo.get_data(sig_full_name)
                if len(already_sig_info)!=0 and os.path.exists(repo_path) and len(already_tag)==1:
                    IsUpdate=False
                    func_dict, func_src_dict, ver_cnt, tag_date ,IsUpdate= self.generate_signatures(repo_path, _idx, oss_url,already_sig_info,already_tag[0],IsUpdate)
                    print(f'之前签名:{len(already_sig_info)}')
                    print(f'查看funcidct长度:{len(func_dict)}')
                    print(f'update:{IsUpdate}')
                    if IsUpdate:
                        if len(func_dict) != 0:
                            res=self.initialSig.get_data({'sig_full_name':sig_full_name},{'sig_info':1})[0]
                            if isinstance(res['sig_info'], bson.objectid.ObjectId):
                                self.update_to_initialSig(oss['_id'],sig_full_name, func_dict, ver_cnt,res['sig_info'])
                            else:
                                self.update_to_initialSig(oss['_id'], sig_full_name, func_dict, ver_cnt,None)
                        if len(func_src_dict) != 0:
                            # self.store_to_Func_Src(oss,func_src_dict)
                            pass
                        if len(tag_date) != 0:
                            self.update_to_tagInfo(already_tag[0]['_id'],tag_date)
            except Exception as e:
                print("clone_extract_signatures error:", e)
                traceback.print_exc()
                continue

    def split_list(self, list, n):
        """
        将数据n等分，分发给n个线程执行

        :param list: 原始完整list
        :param n: 切分数量，即线程数量
        :return: 切分后的list起始位置
        """
        step_count = math.ceil(len(list) / n)
        for i in range(0, len(list), step_count):
            end = i + step_count
            if end > len(list):
                end = len(list)
            # print(f"yield: {i}:{end}")
            yield list[i:end]

    def main(self, processList=None, num_processes=8):
        if processList is None:
            processList = self.initialSig.get_one_sig({},{})#将初始库中所有数据都取出
        if len(processList) == 0:
            print("没有需要处理的仓库")
        else:
            print(f"共计要处理:{len(processList)}")
            processes = []
            for idx, list_arr in enumerate(self.split_list(processList, num_processes)):  # 分割任务列表并获取进程编号
                process = multiprocessing.Process(target=self.update_repo_signature, args=(idx + 1, list_arr))
                processes.append(process)
            for process in processes:
                process.start()
            for process in processes:
                process.join()  # 等待所有子进程结束后再执行主进程的代码
@click.command()
@click.option('--cpu', type=int, default=8, help='并发执行数')
def main(cpu):
    updateoss = UpdateOSS()
    updateoss.main(None, cpu)
if __name__ == "__main__":
   main()