"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
# 任务：提取已经收集到的所有initialSig，要知道每个func出现在了哪个Oss里
# 输入：初始签名库中所有oss
# 输出：每个函数出现的oss信息，按行写入json文件


import copy
import re
import sys
from concurrent.futures import ThreadPoolExecutor, wait
import os
import json
import threading
import traceback
import concurrent.futures

import click
import schedule
import time
from pathlib import Path

from jsonlines import jsonlines
from tqdm import tqdm
from collections import defaultdict

from src.common_utils import configure
from src.common_utils.logger import logger

from src.operate_db.InitialSig import InitialSig as InitialSig
from src.operate_db.RepoFunction_db import RepoFunction
from src.operate_db.TagInfo_db import TagInfo as TagInfo

# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
sys.path.append(father_path)

class FuncMapper:
    tagInfo = TagInfo()
    initialSig = InitialSig()
    repoFunction=RepoFunction()
    # func_info_all_midres_path = Path(g_cur_dir, "data_out", "func_info_all_midres.json")  # 记录中间数据
    # func_info_all_noderes_path = Path(g_cur_dir, "data_out", "func_info_all.json")  # 记录所有函数信息的文件
    func_info_all_midres_path = Path(configure.CONFIG['component_construct'], "func_info_all_midres.json")  # 记录中间数据
    func_info_all_noderes_path = Path(configure.CONFIG['component_construct'], "func_info_all.json")  # 记录所有函数信息的文件
    func_info_all = defaultdict(dict)
    cursor = 0  # 记录处理到哪个oss了

    def __init__(self):
        self.stop_storing = False
        self.store_results_job = None  # 存储中间结果的定时任务


    def store_results_to_file(self):
        # 将结果存储到文件中
        cursor = copy.copy(self.cursor)
        data = self.func_info_all.copy()
        line=0
        with jsonlines.open(self.func_info_all_midres_path, 'w') as writer:
            if line==0:
                writer.write(str(cursor))
            for key, value in data.items():
                writer.write({key: value})
        print("中间文件记录完成")

    def store_results_periodically(self):
        # 定时任务，每隔2个小时调用 store_results_to_file() 函数
        # schedule.every(2).hours.do(self.store_results_to_file)
        schedule.every(1).minutes.do(self.store_results_to_file)
        while not self.stop_storing:
            schedule.run_pending()
            time.sleep(1)

    def stop_storing_results(self):
        # 取消存储中间结果的定时任务
        if self.store_results_job is not None:
            schedule.cancel_job(self.store_results_job)
        # 清除所有待执行的定时任务
        schedule.clear()

    def load_mid_result(self):
        if self.func_info_all_midres_path.exists():
            with open(self.func_info_all_midres_path, 'r',encoding='utf-8') as file:
                # 逐行读取 JSON 数据
                line_number = 0
                for line in file:
                    if line_number == 0:
                        self.cursor = int(json.loads(line))
                        line_number += 1
                    else:
                        self.func_info_all.update(json.loads(line))

    def load_from_db_continue(self,num_process, sig_num):
        """
        从数据库中加载已经存在的数据
        适用于，明确知道现在的Info数据库中数据是x个oss的，
        接下来需要从x接着生成的情况
        """
        count = self.funcMap.get_data_count()
        if count != 0:
            # 说明数据库中已经存在数据了，认为信息已经生成完成，持久化存储了，直接读取就行
            logger.info("[+] load the info of functions")
            result = self.funcMap.get_data({}, {'func_hash':1,'func_owner':1})
            for res in result:
                self.func_info_all[res['func_hash']] = res['func_owner']
        self.generate_func_map(num_process, sig_num)

    def  load_from_midres_continue(self,num_process):
        """
        从中间文件中读取上一次执行的结果，继续执行
        @param num_process: 并发执行的进程数
        @return: 生成文件
        """
        # 数据库没有数据，接着上一次的结果继续生成
        logger.info("[+] construct the info of functions")
        self.load_mid_result()  # 先读取上一次结果
        self.generate_func_map(num_process,self.cursor)
    def find_tag_info(self,sig_full_name):
        tag_time=[]
        res=self.tagInfo.get_data(sig_full_name)
        if len(res)!=0:
            tag_time=res[0]['tag_time']
        return tag_time
    def find_func_path(self,sig_full_name):
        tag_path=defaultdict(dict)
        tag_func=self.repoFunction.get_one_tagfunc({'sig_full_tag_name':{"$regex": f"^{re.escape(sig_full_name+'@@')}"}},{'tag_func':1,'sig_full_tag_name':1})
        for tag in tag_func:
            tagname=tag['sig_full_tag_name'].replace(sig_full_name+'@@',"")
            tag_path[tagname]=tag['tag_func']
        return tag_path



    def generate_func_map_detail(self,sig,pbar):
        """
        """
        try:
            tag_time=self.find_tag_info(sig['sig_full_name'])
            tag_time_dict = {date[0]: date[1] for date in tag_time} if tag_time != 0 else {}
            func_path = self.find_func_path(sig['sig_full_name'])
            for funchash,functag in sig['sig_info'].items():
                func_time=[]#记录这个函数在这个oss中出现的所有时间
                for tag in functag:
                    commit_time=tag_time_dict[tag]
                    if len(commit_time)!=19:
                        pattern = r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"
                        match = re.search(pattern, commit_time)
                        if match:
                            extracted_time = match.group(1)
                            commit_time=extracted_time
                    func_time.append((commit_time, tag))
                func_time.sort(key=lambda x: x[0])  # 按时间升序排序
                self.func_info_all[funchash][sig['sig_full_name']] = (func_time[0][0],list(func_path[func_time[0][1]][funchash])) # 一个oss下同一个函数只选择最早出现的那个作为信息
            self.cursor += 1
            print(f"已经完成前{self.cursor}个")
            pbar.update()
            return 0
        except Exception as e:
            traceback.print_exc()
            print(str(e))
    def generate_func_map(self, num_threads, sig_num, processList=None):
        """
        生成函数索引的核心函数
        会创建线程，每隔一定时间记录一次中间数据
        @param num_threads: 并发执行的进程数
        @param sig_num: 跳过的oss数
        @param processList: 需要处理的oss
        @return: 生成文件
        """
        if processList is None:
            processList = self.initialSig.get_one_sig({}, {"sig_full_name": 1, "sig_info": 1, "_id": 0}, sig_num)
        count = self.initialSig.get_data_count()

        # 创建线程来执行存储中间结果的定时任务
        store_results_thread = threading.Thread(target=self.store_results_periodically)
        store_results_thread.start()

        futures = []
        with tqdm(total=count - self.cursor,desc="oss处理进度") as pbar:
            with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
                # 提交子线程任务
                for sig in processList:
                    try:
                        future = executor.submit(self.generate_func_map_detail, sig,pbar)
                        futures.append(future)
                    except Exception as e:
                        logger.error('[+] An error occurred', exc_info=True)
                        traceback.print_exc()

        # 等待所有子线程完成
        wait(futures)
        print("开始记录数据")
        self.stop_storing = True
        # 停止存储中间结果的定时任务
        self.stop_storing_results()
        # 等待存储中间结果的线程结束
        store_results_thread.join()
        with jsonlines.open(self.func_info_all_noderes_path, 'w') as writer:
            for key, value in self.func_info_all.items():
                writer.write({key: value})
        print("funcinfo数据记录完成")
        # store_dict = []
        # for key in self.func_info_all:
        #     data = {
        #         'func_hash': key,
        #         'func_owner': self.func_info_all[key]
        #     }
        #     store_dict.append(data)
        # print("开始向数据库存储")
        # self.funcMap.clear_collection()  # 先将数据库清空，防止批量插入主键冲突导致失败【先清空再批量插入是尝试过效率最好的办法】
        # self.funcMap.add_many_data(store_dict)  # 存到数据库中

@click.command()
@click.option('--cpu', type=int, default=8, help='并发执行数')
def main(cpu):
    funcMapper = FuncMapper()
    funcMapper.load_from_midres_continue(cpu)
if __name__ == "__main__":
   main()