"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
import io
import math
import multiprocessing
import os
import sys
import time
import traceback
from collections import defaultdict

from tqdm import tqdm

from src.common_utils.utils_file import compress_dict
from src.operate_db.InitialSig import InitialSig as InitialSig
from src.operate_db.VerWeight import VerWeight
from src.common_utils.logger import logger
# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
sys.path.append(father_path)

class GenerateVerweight:
    oss_sigdb = InitialSig()
    verWeight=VerWeight()

    def get_weight(self,S):
        return self.verWeight.get_one_weight({'sig_full_name':S},{})

    def split_list(self, list, n):
        """
        将数据n等分，分发给n个线程执行

        :param list: 原始完整list
        :param n: 切分数量，即线程数量
        :return: 切分后的list起始位置
        """
        step_count = math.ceil(len(list) / n)
        for i in range(0, len(list), step_count):
            end = i + step_count
            if end > len(list):
                end = len(list)
            # print(f"yield: {i}:{end}")
            yield list[i:end]
    def store_to_weight(self,S,weight,ver_cnt):
        if len(weight) != 0:
            weight_zip = compress_dict(weight)
            threshold = 15800000  # 默认为15.8MB
            bson_size = len(weight_zip)
            if bson_size <= threshold:
                data = {
                    'sig_full_name': S,
                    'weight': weight_zip,
                    'ver_cnt':ver_cnt
                }
            else:
                # 使用GridFS存储
                # print(f"使用gridfs存储方式")
                # json_data = json.dumps(func_dict)
                weight_id = self.verWeight.use_Gridfs_store(io.BytesIO(weight_zip), S + ".gz")
                data = {
                    'sig_full_name': S,
                    'weight': weight_id,
                    'ver_cnt': ver_cnt

                }
            self.verWeight.add_one(data)

    def process_oss(self, idx, list_arr):
        """
        进程执行的子函数
        @param idx: 进程编号
        @param list_arr: 需要处理的任务列表
        @return: 数据库修改
        """
        for sig in tqdm(list_arr, total=len(list_arr), desc=f"thread{idx}处理oss进度"):
            weightJson=defaultdict(float)
            S = sig['sig_full_name']
            ossinfo = self.oss_sigdb.get_one_sig({'sig_full_name': S}, {"sig_full_name": 1, "sig_info": 1, "oss_url": 1,"sig_info_cnt": 1, "ver_cnt": 1})[0]
            try:
                for hashval,verlst in ossinfo['sig_info'].items():
                    print(f"{hashval}:{len(verlst)},{math.log(float(ossinfo['ver_cnt']) / float(len(verlst)))}")
                    weightJson[hashval] = math.log(float(ossinfo['ver_cnt']) / float(len(verlst)))
                self.store_to_weight(S,weightJson,ossinfo['ver_cnt'])
            except Exception as e:
                logger.error('[+]  An error occurred', exc_info=True)
                traceback.print_exc()
                continue

    def process_all_initialsig(self, num_processes, skip_num, all_component_name=None):
        """
        生成开所有初始签名库的oss版本权重的主函数
        @param num_processes: 并发执行进程数 int
        @param skip_num: 跳过oss数 int （表示下一次从哪里开始处理oss，适用于中途停掉的情况，或者是新增）
        @param all_component_name: 需要处理的oss名称 list
        @return: 修改数据库
        """
        logger.info("[+] generate verweight")
        processes = []
        if all_component_name == None:
            all_component_name = self.oss_sigdb.get_data({}, {"sig_full_name": 1}, skip_num)
        for idx, list_arr in enumerate(self.split_list(list(all_component_name), num_processes)):  # 分割任务列表并获取进程编号
            process = multiprocessing.Process(target=self.process_oss, args=(idx + 1, list_arr))
            processes.append(process)
        for process in processes:
            process.start()
        for process in processes:
            process.join()  # 等待所有子进程结束后再执行主进程的代码



if __name__ == "__main__":
    start = time.time()
    generateVerweight= GenerateVerweight()
    generateVerweight.process_all_initialsig(skip_num=0, num_processes=8)
    end = time.time()
    print(f"共计用时：{end - start}")