"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""

# 任务：因为同一个func可以出现在多个Oss中，这是用来判断这些oss哪个是原始的
# 判断方法：
#         a.只需要判断函数出现在不止一个oss的情况
#         b.按照函数目录元素判断，在目录中出现频率较高的元素并且还是oss的名称，那么这个oss极有可能是原始库，如果出现多个这样的oss，按照函数诞生时间判断
#         c.需要划分oss是2019年之前还是2019年之后，如果是在同一个时间段内按照正常谁更早谁原始，如果一个19年之前一个19年之后，倾向认为19年之后原始，这是由需求决定的，因为目标软件一般使用4年内的TPL
#         d.如果确定函数是出现在libs,external,3rd_party这种明显的外部目录下，且在数据库中没有找到归属oss，就认为这些函数极有可能来自没有包含在特征库的TPL，在后续会删掉这部分函数，避免在检测结果中出现因为这部分函数和目标代码重合的误报
import copy
import json
import os
import sys
import threading
import time

from collections import defaultdict
from datetime import datetime

from Levenshtein import distance

import schedule
from jsonlines import jsonlines
from tqdm import tqdm

from src.common_utils import configure
from src.operate_db.InitialSig import InitialSig as InitialSig
from src.ossdb_construction import tpllite_config

# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
sys.path.append(father_path)
from src.common_utils.logger import logger
from pathlib import Path, PurePath
class ObtainFuncOrigin :
    oss_sigdb = InitialSig()
    # func_origin_path = Path(os.path.join(g_cur_dir,"data_out","func_origin_tplite.json"))
    # func_origin_midres_path=Path(g_cur_dir ,"data_out","func_origin.json")#记录中间数据
    # func_origin_all_res_path = Path(g_cur_dir, "data_out", "func_origin_all_res.json")  # 记录最后结果
    # func_info_all_noderes_path = Path(g_cur_dir, "data_out", "func_info_all.json")  # 记录所有函数信息的文件
    func_origin_midres_path = Path(configure.CONFIG['component_construct'], "func_origin.json")  # 记录中间数据
    func_origin_all_res_path = Path(configure.CONFIG['component_construct'], "func_origin_all_res.json")  # 记录最后结果
    func_info_all_noderes_path = Path(configure.CONFIG['component_construct'], "func_info_all.json")  # 记录所有函数信息的文件
    func_origin=defaultdict(dict)
    cursor=0#记录处理到哪个func了

    def __init__(self):
        self.stop_storing = False
        self.store_results_job = None  # 存储中间结果的定时任务

    def store_results_to_file(self):
        # 将结果存储到文件中
        # 将结果存储到文件中
        cursor = copy.copy(self.cursor)
        data = self.func_origin.copy()
        line = 0
        with jsonlines.open(self.func_origin_midres_path, 'w') as writer:
            if line == 0:
                writer.write(str(cursor))
            for key, value in data.items():
                writer.write({key: value})
        print("中间文件记录完成")

    def store_results_periodically(self):
        # 定时任务，每隔2个小时调用 store_results_to_file() 函数
        # schedule.every(2).hours.do(self.store_results_to_file)
        schedule.every(30).minutes.do(self.store_results_to_file)
        while not self.stop_storing:
            schedule.run_pending()
            time.sleep(1)

    def stop_storing_results(self):
        # 取消存储中间结果的定时任务
        if self.store_results_job is not None:
            schedule.cancel_job(self.store_results_job)
        # 清除所有待执行的定时任务
        schedule.clear()

    def load_mid_result(self):
        if self.func_origin_midres_path.exists():
            # 打开 JSON 文件
            with open(self.func_origin_midres_path, 'r') as file:
                # 逐行读取 JSON 数据
                # 逐行读取 JSON 数据
                line_number = 0
                for line in file:
                    if line_number == 0:
                        self.cursor = int(json.loads(line))
                        line_number += 1
                    else:
                        self.func_origin.update(json.loads(line))
    def obtain_func_origin(self):
        """
        读取上一次生成的结果，继续执行
        对每隔函数判断原始库的主函数
        @return: 生成文件
        """
        start = time.time()
        #func_info_count = self.funcInfoData.get_data_count()  # 获取func_info个数
        # if func_info_count == 0:  # 如果func_info为空，生成信息
        if not os.path.exists(self.func_info_all_noderes_path):
            print("还没有func_info信息")
        else:
            #接着上一次的结果继续生成
            logger.info("[+] construct the func_orign info")
            self.load_mid_result()#先读取上一次结果
            self.generate_func_origin_enhance(self.cursor)
            end=time.time()
            print(f"加载func_origin用时:{end-start}")
            print(f"func_origin的内存总占用：{sys.getsizeof(self.func_origin)}")
        return self.func_origin

    def segment_by_decade(self, tpl_candidate):
        # 将日期分为2019年之前和2019年之后的两个列表

        before_2019 = []
        after_2019 = []
        equal_values = []
        for candidate in tpl_candidate:
            time = datetime.strptime(candidate[1], "%Y-%m-%d %H:%M:%S")
            if time.year < 2019:
                before_2019.append(candidate)
            else:
                after_2019.append(candidate)
        print(f'2019 h:{after_2019}')
        print(f'2019 b{before_2019}')
        # 如果都是2019年之前的日期或者都是2019年之后的日期，找到最早的日期
        if before_2019 and not after_2019:
            equal_values=self.equal(before_2019)
        elif not before_2019 and after_2019:
            equal_values = self.equal(after_2019)
        elif before_2019 and after_2019:
            # 如果既有2019年之前的日期又有2019年之后的日期，找到2019年之后最早的日期
            equal_values = self.equal(after_2019)
        return equal_values
    def equal(self,tpl_candidate):
        # 创建一个空列表来存储相等的元素
        equal_values = []
        eqal=tpl_candidate[0][1]
        # 遍历排序后的列表，并找出相等的元素
        for i in range(0,len(tpl_candidate)):
            if eqal==tpl_candidate[i][1]:
                equal_values.append(tpl_candidate[i])

        return equal_values
    def generate_func_origin_enhance(self,func_num):
        """
        判断原始库的核心函数
        @param func_num: 跳过的func数，就是从第几个func开始继续执行
        @return: 生成文件
        """
        func_info_all={}
        with open(self.func_info_all_noderes_path, 'r', encoding='utf-8') as file:
            # 逐行读取 JSON 数据
            for line in file:
                    func_info_all.update(json.loads(line))
        logger.info("[+] generate_func_origin_enhance")
        # parse the origin tpl
        #func_info_all = self.funcInfoData.get_many_data({}, {})  # 获取func_info的信息
        #func_info_count = self.funcInfoData.get_data_count()
        func_info_count=len(func_info_all)
        tplnames = self.oss_sigdb.get_many_data({}, {'sig_full_name': 1, "_id": 0, "repo_name": 1})
        tpl_names = {tplname['sig_full_name']: tplname['repo_name'] for tplname in tplnames}

        # 创建线程来执行存储中间结果的定时任务
        store_results_thread = threading.Thread(target=self.store_results_periodically)
        store_results_thread.start()

        for func_id,func_info in tqdm(func_info_all.items(), total=(func_info_count-func_num), desc="func处理进度", unit="个func"):
            if len(func_info) <= 1:  # 说明这个函数是唯一出现的，没有发生过重用
                continue
            tpl_time = list()
            seg_count = defaultdict(int)
            tpl_name_id = defaultdict(list)
            for tpl_id, info in func_info.items():
                extern_flag = False
                seg_set = set()
                tpl_name = tpl_names[tpl_id]
                if len(info[1])==0:#有可能路径是
                    print(f"{func_id}:info:{info}")
                    continue
                func_path = PurePath(info[1][0].lower())
                print(f"func_path:{func_path}")
                commit_time = info[0]
                for seg in func_path.parent.parts + (
                func_path.stem,):  # 把这个函数的路径变成元组('examples', 'clockerplugin', 'clockerlistener')，逐个目录解析
                    if seg in tpllite_config.EXTERN_FLAG:  # 判断目录是不是有可能引入外部文件
                        extern_flag = True
                    if seg in tpllite_config.BLACK_SET or seg == tpl_name:  # 判断这个seg是一些通用目录名称
                        continue
                    seg_set.add(seg)
                for seg in seg_set:  # 记录目录元素的出现频率
                    seg_count[seg] += 1
                if not extern_flag:
                    tpl_info = (tpl_id, commit_time)
                    tpl_name_id[tpl_name].append(tpl_info)
                    tpl_time.append(tpl_info)
            # check the function path
            if len(seg_count) and len(tpl_name_id):
                tpl_candidate = list()
                lower_count = 1 if len(func_info) <= 3 else 2
                seg_sort = sorted(seg_count.items(),
                                  reverse=True, key=lambda x: x[1])
                max_similarity = 0.61
                max_sim_tpl_name = []
                for seg, count in seg_sort:
                    if count < lower_count:  # 目录元素出现的频率太低
                        break
                    if seg in tpllite_config.SPECIAL_CASE:
                        self.func_origin[func_id]=[ (tpllite_config.SPECIAL_CASE[seg], 0)]
                        break
                    for tplname in tpl_name_id:
                        current_similarity = 1 - distance(tplname, seg.lower()) / max(len(tplname), len(seg))
                        print(f'{seg}:{tplname}:{current_similarity}')

                        if current_similarity > max_similarity:
                            # 如果当前相似度更高，更新最大相似度和候选项列表
                            max_sim_tpl_name=[]
                            max_similarity = current_similarity
                            max_sim_tpl_name.append(tplname)
                        elif current_similarity == max_similarity and max_similarity!=0:
                            # 如果相似度相同，将当前模板对应的数据扩展到候选项列表
                            if tplname not in max_sim_tpl_name:
                                max_sim_tpl_name.append(tplname)
                    print(f'max_sim_tplname:{len(max_sim_tpl_name)}:{max_sim_tpl_name}')
                for tplname in max_sim_tpl_name:
                    tpl_candidate.extend(tpl_name_id[tplname])  # 如果是，说明这个tpl就经常被引，把它的信息记下
                if func_id not in self.func_origin and len(tpl_candidate):
                    tpl_candidate.sort(key=lambda x: x[1])  # 还是按照时间升序排序，最早出现的排在前面
                    print(f'tplcandicate:{tpl_candidate}')
                    equal=self.segment_by_decade(tpl_candidate)
                    if len(equal) !=1:
                        print(f"seg不唯一{func_id}")
                    data=[]
                    for item in equal:
                        data.append((item[0], 0))   # 还是选择函数最早存在的作为原始的tpl
                    self.func_origin[func_id] =data

            if func_id not in self.func_origin and len(tpl_time):  # 这是对于分层路径匹配遗漏掉的，再使用函数生成时间做一次处理
                # check function birth time
                tpl_time.sort(key=lambda x: x[1])#time jiangxu
                print(f'tpltime:{tpl_time}')
                equal = self.segment_by_decade(tpl_time)
                if len(equal) != 1:
                    print(f"t不唯一{func_id}")
                data=[]
                for item in equal:
                    data .append(item)
                self.func_origin[func_id]=data
            if func_id not in self.func_origin and len(tpl_time)==0:
                self.func_origin[func_id]=[('external lib',1)]  #这是确定函数属于外部库的

            self.cursor+=1

        self.stop_storing = True
        # 停止存储中间结果的定时任务
        self.stop_storing_results()
        # 等待存储中间结果的线程结束
        store_results_thread.join()
        #构建要更新的操作列表
        # bulk_operations = []
        # for hash,orign_info in self.func_origin.items():
        #     update_operation = UpdateOne(
        #         {'func_hash': hash},
        #         {'$set': {'func_origin': orign_info}}
        #     )
        #     bulk_operations.append(update_operation)
        print("开始写入数据")
        with jsonlines.open(self.func_origin_all_res_path, 'w') as writer:
            for key, value in self.func_origin.items():
                writer.write({key: value})
        print("写入数据完成")
        #self.funcInfoData.update_many_data(bulk_operations)  # 存到数据库中



if __name__ == '__main__':
    obtainFuncOrigin=ObtainFuncOrigin()
    obtainFuncOrigin.obtain_func_origin()

