"""
  Copyright (c) 2024 Northeastern University (China)
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
"""
# 任务：ccscaner扫描得到的依赖项文件scaner_dep.json中只记录了名字，需要从本地数据库OSSDB以及联网github中搜索得到对应的地址url,
# 之后添加到alloss.json文件中
# 【1】读取scanner_dep.jl文件，结果存到all_deps字典列表里,对ccscaner的结果做一次简单的过滤，就是把特殊符号的删去
# 【2】读取error_dep.jl文件，存到error_deps[]
# 【3】从本地数据库OSSDB中模糊搜索符合depname的数据。对搜索数据相似度排序，选取达到0.85的最相似结果返回
# 【4】如果【3】搜索为None,从github上搜索，使用githubapi,搜索非fork的项目，对搜索结果排序，选取达到0.85的最相似结果返回
# 【5】如果【4】搜索也是None,将这个depname记入error_dep.jl，如果后续OSSDB丰富，可以继续搜索，如果搜索不为空，搜索结果加入alloss.jl

import math
import os
import re
import sys
import threading
import traceback

from tqdm import tqdm
from fuzzywuzzy import fuzz
from src.common_utils.utils_file import read_jl, write_append_jl,get_website
from src.operate_db.test.oss_db import OSSDB
from src.common_utils import configure
from src.common_utils.logger import logger
# 当前文件的路径
g_cur_dir = os.path.dirname(os.path.abspath(__file__))
# 当前文件的父路径
father_path = os.path.dirname(g_cur_dir)
sys.path.append(father_path)
similar_threshold=configure.CONFIG['similar_threshold']
filter = ["$" ,"{","]","/","(",")","#\\","[","]","depname"]

class FindDepsAddress:
    ossdb=OSSDB()
    # deps_path=g_cur_dir + r'\data_out\scaner_dep.jl'#ccsacaner的扫描结果
    # alloss_path = g_cur_dir + r'\data_out\process_oss.jl'  # 所有需要下载的oss的名字和地址
    # error_dep_path=g_cur_dir+r'\data_out\error_dep.jl'#搜索不成功的依赖项
    deps_path = os.path.join(configure.CONFIG['ossdb_construct'],'scaner_dep.jl') # ccsacaner的扫描结果
    alloss_path = os.path.join(configure.CONFIG['ossdb_construct'],'process_oss.jl')  # 所有需要下载的oss的名字和地址
    error_dep_path = os.path.join(configure.CONFIG['ossdb_construct'],'error_dep.jl')  # 搜索不成功的依赖项
    error_deps=[]
    all_deps=[]
    exist_oss=[]

    def __init__(self):
        all_deps=read_jl(self.deps_path)
        for dep in all_deps:
            count = 0
            if len(dep['name']) > 1 and dep['name'] != "":
                for filt in filter:
                    if filt not in dep['name']:
                       count=count+1
                    else:
                        break
                if count==len(filter):#说明这个依赖项名称基本干净
                    self.all_deps.append(dep)


    def similar(self,candicate,target):
        # 计算每个项目名称与目标字符串的相似度得分
        scores = [(data, fuzz.ratio(data['oss_name'], target))
                  for data in candicate]

        # 筛选出相似度达到85%以上的项目
        high_similarity_projects = [(project, score)
                                    for project, score in scores
                                    if score >= similar_threshold]

        if not high_similarity_projects:
            return []

        # 根据相似度得分进行排序
        high_similarity_projects.sort(key=lambda x: x[1], reverse=True)
        max_similarity = high_similarity_projects[0][1]
        highest_similarity_projects = []

        # 找到所有得分与最高值相同的项目
        for project in high_similarity_projects:
            if project[1] == max_similarity:
                highest_similarity_projects.append(project)

        # 输出最高得分的项目
        print(f"最高分：{highest_similarity_projects}")
        results=[]
        for data in highest_similarity_projects:
            results.append(data[0])
        return results

    def search_from_ossdb(self,target):
        """
        从数据库OSSDB中模糊搜索,搜索结果可能时多个或者空
        :param target: 目标字符串
        :return:
        """
        ossaddress = []
        filter={"$or": [{"oss_name": {"$regex": "(?i)" + re.escape(target)}}, {"bin_pkgs": {"$regex": "(?i)" + re.escape(target)}}]}
        results=self.ossdb.get_data(filter,{})
        # 如果结果数量为1，直接返回结果
        unique_results = []
        if len(results) == 1:
            result = results[0]
            unique_results=results
            print(result)
        else:
            # 如果结果数量大于1，进行进一步处理
            seen_upstream_urls = set()
            for result in results:
                upstream_url = result["upstream_url"]
                # 如果 upstream_url 不在 seen_upstream_urls 集合中，将其添加到 unique_results 中，并将其加入 seen_upstream_urls 集合
                if upstream_url not in seen_upstream_urls:
                    unique_results.append(result)
                    seen_upstream_urls.add(upstream_url)
        result=self.similar(unique_results,target)
        if len(result)!=0:
            for item in result:
                data = {
                    'name': item['oss_name'],
                    'url':item['upstream_url'],
                    'source':item['oss_source']
                    # 因为数据库中网址字段还不干净，有\s\r\n
                }
                ossaddress.append(data)
                #在数据库中记录这个包被搜索的频次
                search_count =item.get("search_count", 0)
                # 增加搜索计数
                search_count += 1
                self.ossdb.update_one(item['_id'],{"search_count":search_count})
        return ossaddress
    def search_from_github(self,target):
        non_forked_projects=[]#所有非fork项目
        def process_results(non_forked_projects):
            ossaddress = []
            # 计算每个项目名称与目标字符串的相似度得分
            scores = [(project['name'], fuzz.ratio(project['name'], target), project['html_url'],project['stargazers_count'])
                      for project in non_forked_projects]

            # 筛选出相似度达到85%以上的项目
            high_similarity_projects = [(project, score, url,stars)
                                        for project, score, url,stars in scores
                                        if score >= similar_threshold]

            if not high_similarity_projects:
                return []

            # 根据相似度得分进行排序
            high_similarity_projects.sort(key=lambda x: (x[1],x[3]),reverse=True)
            max_similarity = high_similarity_projects[0][1]
            max_stars = high_similarity_projects[0][3]
            highest_similarity_projects = []

            # 找到所有得分与最高值相同的项目
            for project in high_similarity_projects:
                if project[1] == max_similarity and project[3] == max_stars:
                    highest_similarity_projects.append(project)

            # 输出最高得分的项目
            print(f"最高分且高星：{highest_similarity_projects}")
            for project in highest_similarity_projects:
                oss_name = project[0]
                url = project[2]
                ret = {
                    'oss_name': oss_name,
                    'oss_source': "github",
                    'upstream_url': url.lower(),
                    'git_upstream': True,
                    'search_count':1
                }
                # 存到数据库里
                self.ossdb.add_meta(ret)
                ossaddress.append({
                'name': oss_name,
                'url': url.lower(),
                'source':"github"
            })
            # 返回最相似的项目
            return ossaddress

        def get_next_page_link(headers):
            link_header = headers.get('Link')
            if link_header:
                links = link_header.split(', ')
                for link in links:
                    if 'rel="next"' in link:
                        return link.split('; ')[0][1:-1]  # 提取链接部分

        token= configure.CONFIG['githubtoken']
        # 设置 GitHub API 的请求头，包括认证信息和接受的媒体类型
        headers = {
            'Authorization': f'Token {token}',
            'Accept': 'application/vnd.github.v3+json'
        }
        #只寻找使用了c/c++语言的项目，不是fork的独立仓库，星>100
        url = f"https://api.github.com/search/repositories?q={target}+language%3AC%2B%2B+language%3AC&sort=stars&order=desc&per_page=100"
        print(f"第一页：{url}")
        response=get_website(url,headers)
        if response is not None:
            data =response.json()
            # 处理第一页结果
            non_forked_projects=data['items']
            # 获取下一页链接
            next_page_link = get_next_page_link(response.headers)

            # 处理后续页结果
            while next_page_link:
                print(f"下一页链接：{next_page_link}")
                response = get_website(next_page_link, headers=headers)
                data = response.json()
                # 处理当前页结果
                non_forked_projects.extend(data['items'])
                # 获取下一页链接
                next_page_link = get_next_page_link(response.headers)
            return process_results(non_forked_projects)
        else:
            return []

    def search_from_gitlab(self, target):
        non_forked_projects = []  # 所有非fork项目

        def process_results(non_forked_projects):
            gitlab_address = []

            # 计算每个项目名称与目标字符串的相似度得分
            scores = [(project['name'], fuzz.ratio(project['name'], target), project['web_url'], project['star_count'])
                      for project in non_forked_projects]

            # 筛选出相似度达到85%以上的项目
            high_similarity_projects = [(project, score, url, stars)
                                        for project, score, url, stars in scores
                                        if score >= similar_threshold]

            if not high_similarity_projects:
                return []

            # 根据相似度得分和星星数量进行排序
            high_similarity_projects.sort(key=lambda x: (x[1], x[3]), reverse=True)
            max_similarity = high_similarity_projects[0][1]
            max_stars = high_similarity_projects[0][3]
            highest_similarity_projects = []

            # 找到所有得分和星星数量最高的项目
            for project in high_similarity_projects:
                if project[1] == max_similarity and project[3] == max_stars:
                    highest_similarity_projects.append(project)

            # 输出得分最高的项目
            print(f"最高得分且高星：{highest_similarity_projects}")

            for project in highest_similarity_projects:
                oss_name = project[0]
                url = project[2]
                ret = {
                    'oss_name': oss_name,
                    'oss_source': "gitlab",
                    'upstream_url': url.lower(),
                    'git_upstream': True,
                    'search_count': 1
                }
                # 存储到数据库中
                self.ossdb.add_meta(ret)
                gitlab_address.append({
                    'name': oss_name,
                    'url': url.lower(),
                    'source': "gitlab"
                })

            # 返回相似度最高的项目
            return gitlab_address

        def get_next_page_link(headers):
            link_header = headers.get('Link')
            if link_header:
                links = link_header.split(', ')
                for link in links:
                    if 'rel="next"' in link:
                        return link.split('; ')[0][1:-1]  # 提取链接部分

        languages = 'C,C++'  # 用逗号分隔多个语言

        # GitLab用于搜索项目的API终端
        gitlab_url = f"https://gitlab.com/api/v4/projects?search={target}&languages={languages}&per_page=100"

        headers = {
            'Private-Token': configure.CONFIG['gitlabtoken']  # 用您的GitLab访问令牌替换
        }
        count=0
        print(f"第一页：{gitlab_url}")
        response =get_website(gitlab_url, headers=headers)

        if response.status_code == 200:
            data = response.json()
            # 处理第一页的结果
            non_forked_projects = data
            # 获取下一页的链接
            next_page_link = get_next_page_link(response.headers)

            # 处理后续页的结果
            while next_page_link:
                if count>100:
                    break
                print(f"下一页链接：{next_page_link}")
                response =get_website(next_page_link, headers=headers)
                data = response.json()
                # 处理当前页的结果
                non_forked_projects.extend(data)
                # 获取下一页的链接
                next_page_link = get_next_page_link(response.headers)
                count+=1

            return process_results(non_forked_projects)
        else:
            return []

    def search_from_gitee(self, target):

        def process_results(non_forked_projects):
            ossaddress = []
            # 计算每个项目名称与目标字符串的相似度得分
            #注意，gitee中name是仓库中文名称，不好对比，所以选path，也就是仓库最后一个节点，可以当作仓库名称
            scores = [
                (project['path'], fuzz.ratio(project['path'], target), project['html_url'], project['stargazers_count'])
                for project in non_forked_projects]

            # 筛选出相似度达到85%以上的项目
            high_similarity_projects = [(project, score, url, stars)
                                        for project, score, url, stars in scores
                                        if score >= similar_threshold]

            if not high_similarity_projects:
                return []

            # 根据相似度得分进行排序
            high_similarity_projects.sort(key=lambda x: (x[1], x[3]), reverse=True)
            max_similarity = high_similarity_projects[0][1]
            max_stars = high_similarity_projects[0][3]
            highest_similarity_projects = []

            # 找到所有得分与最高值相同的项目
            for project in high_similarity_projects:
                if project[1] == max_similarity and project[3] == max_stars:
                    highest_similarity_projects.append(project)

            # 输出最高得分的项目
            print(f"最高分且高星：{highest_similarity_projects}")
            for project in highest_similarity_projects:
                oss_name = project[0]
                url = project[2]
                ret = {
                    'oss_name': oss_name,
                    'oss_source': "gitee",
                    'upstream_url': url.lower().rstrip(".git"),#gitee的仓库html_url有git后缀，为保持一致，把后缀去掉
                    'git_upstream': True,
                    'search_count': 1
                }
                # 存到数据库里
                self.ossdb.add_meta(ret)
                ossaddress.append({
                    'name': oss_name,
                    'url': url.lower().rstrip(".git"),
                    'source': "gitee"
                })
            # 返回最相似的项目
            return ossaddress

        def get_total_page(headers):
            total_page = headers.get('total_page')
            if total_page:
                return total_page

        token = configure.CONFIG['giteetoken']
        page = 1
        # 只寻找使用了c/c++语言的项目，不是fork的独立仓库，星>100
        url = f"https://gitee.com/api/v5/search/repositories?access_token={token}&q={target}&page={page}&per_page=100&fork=false&language=C%2FC%2B%2B&sort=stars_count&order=desc"
        print(f"第一页：{url}")
        response = get_website(url=url)
        print(response)
        if response is not None:
            data = response.json()
            # 处理第一页结果
            non_forked_projects = data
            # 获取下一页链接
            total_page = get_total_page(response.headers)
            # 处理后续页结果
            while page <int(total_page):
                page+=1
                next_url=f"https://gitee.com/api/v5/search/repositories?access_token={token}&q={target}&page={page}&per_page=100&fork=false&language=C%2FC%2B%2B&sort=stars_count&order=desc"
                response = get_website(next_url)
                data = response.json()
                # 处理当前页结果
                non_forked_projects.extend(data)
            return process_results(non_forked_projects)
        else:
            return []
    def search_main(self,target):
        """
        搜索逻辑
        先从本地库搜索
        之后联网搜索
        :param target: 目标字符串
        :return:
        """
        result=self.search_from_ossdb(target)
        print(f"ossdb:{result}")
        if len(result)==0:#从本地库没找到
            result=[]
            result=self.search_from_github(target)
            print(f"github:{result}")
            result_gitlab=self.search_from_gitlab(target)
            print(f"gitlab:{result}")
            result.extend(result_gitlab)
            result_gitee=self.search_from_gitee(target)
            print(f"gitee:{result_gitee}")
            result.extend(result_gitee)
            print(f"最终结果：{result}")

        return result

    def main(self):
        """
        读取ccscaner扫描结果
        之后从本地+github搜索oss地址，添加到allossjson
        :return:
        """
        logger.info(f"[+] find deps URL address")
        self.error_deps=read_jl(self.error_dep_path)
        exist_oss_source=read_jl(self.alloss_path)
        for oss in exist_oss_source:
            self.exist_oss.append(oss['url'])
        with tqdm(total=len(self.all_deps)) as pbar:
            pbar.set_description(f"find deps url address:")
            for dep in self.all_deps:
                try:
                    print(f"scanner依赖项：{dep['name']}")
                    datas=self.search_main(dep['name'])
                    print(f"最终结果：{datas}")
                    if len(datas)!=0:
                        for data in datas:
                            if data['url'] not in self.exist_oss:
                                self.exist_oss.append(data['url'])
                                write_append_jl(self.alloss_path,data)
                    else:
                        if dep not in self.error_deps:
                            write_append_jl(self.error_dep_path,dep)
                    pbar.update()
                except Exception as e:
                    logger.error('[+]  An error occurred', exc_info=True)
                    traceback.print_exc()
                    continue
        #寻找结束后将centris结果合并
        centris_oss=read_jl(g_cur_dir+r'\data_out\centris_oss.jl')
        for oss in centris_oss:
            if oss['url' ] not in self.exist_oss:
                self.exist_oss.append(oss['url'])
                write_append_jl(self.alloss_path, oss)
    def main_find(self,idx,list_arr):
        """
        读取ccscaner扫描结果
        之后从本地+github搜索oss地址，添加到allossjson
        :return:
        """
        with tqdm(total=len(list_arr)) as pbar:
            pbar.set_description(f"thread {idx}find deps url address:")
            for dep in list_arr:
                try:
                    print(f"scanner依赖项：{dep['name']}")
                    datas=self.search_main(dep['name'])
                    print(f"最终结果：{datas}")
                    if len(datas)!=0:
                        for data in datas:
                            if data['url'] not in self.exist_oss:
                                self.exist_oss.append(data['url'])
                                write_append_jl(self.alloss_path,data)
                    else:
                        if dep not in self.error_deps:
                            write_append_jl(self.error_dep_path,dep)
                    pbar.update()
                except Exception as e:
                    logger.error('[+]  An error occurred', exc_info=True)
                    traceback.print_exc()
                    continue
    def split_list(self, list, n):
        """
        将数据n等分，分发给n个线程执行

        :param list: 原始完整list
        :param n: 切分数量，即线程数量
        :return: 切分后的list起始位置
        """
        step_count = math.ceil(len(list) / n)
        for i in range(0, len(list), step_count):
            end = i + step_count
            if end > len(list):
                end = len(list)
            # print(f"yield: {i}:{end}")
            yield list[i:end]
    def main_multi(self,num):
        logger.info(f"[+] find deps URL address")
        self.error_deps = read_jl(self.error_dep_path)
        exist_oss_source = read_jl(self.alloss_path)
        for oss in exist_oss_source:
            self.exist_oss.append(oss['url'])
        if len(self.all_deps)!=0:
            print(len(self.all_deps))
            _idx = 0
            t = []
            for list_arr in self.split_list(self.all_deps, num):  # 启动多线程下载
                print(f"thread:{_idx + 1} do:{len(list_arr)}")
                t.append(threading.Thread(target=self.main_find,args=(_idx + 1, list_arr)))
                _idx += 1
            for i in t:
                i.start()
            for j in t:
                j.join()  # 为了等待所有子线程结束后再执行主线程的代码
        else:
            print("没有需要处理的dep")
    def merge_Centris_result(self):
        # 寻找结束后将centris结果合并
        centris_oss = read_jl(g_cur_dir + r'\data_out\centris_oss.jl')
        for oss in centris_oss:
            if oss['url'] not in self.exist_oss:
                self.exist_oss.append(oss['url'])
                write_append_jl(self.alloss_path, oss)
    def search_from_ossdb_either(self):
        """
        从建立的开源软件索引库中把剩下的收集了
        @return:
        """
        results = self.ossdb.get_data({"oss_source": {"$nin": ["github", "gitlab", "gitee"]}},{})
        print(f"找到{len(results)}个")
        for info in tqdm(results,total=len(results),desc="处理进度"):
            data={
                'name': info['oss_name'],
                'url': info['upstream_url'].lower(),
                'source': info['oss_source']
            }
            if data['url'] not in self.exist_oss:
                self.exist_oss.append(data['url'])
                write_append_jl(self.alloss_path, data)

if __name__ == "__main__":
    findDepsAddress=FindDepsAddress()
    findDepsAddress.search_from_ossdb_either()