# -*- coding: utf-8 -*-
# @Time    : 2025-08-08 16:16:50
# @Author  : zhangbh
# @Email   : 526059967@qq.com
# @File    : FindApiFromGits.py
# @Software: PyCharm
import json
import os
import re

import pandas as pd
import portalocker
from multiprocessing import Pool
from urllib.parse import quote

import gitlab
import requests
from lxml import etree
from tabulate import tabulate

# GitLab配置
token = ''  # 请替换为您的GitLab token
gitlab_url = ''  # 请替换为您的GitLab URL
gl = gitlab.Gitlab(gitlab_url, token)

# 个人中心自己分配申请的key
private_Cookie = ''

headers = {
    'Cookie': private_Cookie
}

# Java Controller注解模式
controller_patterns = [
    r'@RestController',
    r'@ShareRest',
    r'@Controller',
    r'@FeignClient',
    r'@RequestMapping',
    r'@GetMapping',
    r'@PostMapping',
    r'@PutMapping',
    r'@DeleteMapping',
    r'@PatchMapping'
]

# API路径模式
api_path_patterns = [
    r'@RequestMapping\s*\(\s*["\']([^"\']+)["\']',
    r'@GetMapping\s*\(\s*["\']([^"\']+)["\']',
    r'@PostMapping\s*\(\s*["\']([^"\']+)["\']',
    r'@PutMapping\s*\(\s*["\']([^"\']+)["\']',
    r'@DeleteMapping\s*\(\s*["\']([^"\']+)["\']',
    r'@PatchMapping\s*\(\s*["\']([^"\']+)["\']'
]

# 更宽松的API路径模式（处理没有引号的情况）
api_path_patterns_loose = [
    r'@RequestMapping\s*\(\s*["\']?([^"\']+)["\']?\s*\)',
    r'@GetMapping\s*\(\s*["\']?([^"\']+)["\']?\s*\)',
    r'@PostMapping\s*\(\s*["\']?([^"\']+)["\']?\s*\)',
    r'@PutMapping\s*\(\s*["\']?([^"\']+)["\']?\s*\)',
    r'@DeleteMapping\s*\(\s*["\']?([^"\']+)["\']?\s*\)',
    r'@PatchMapping\s*\(\s*["\']?([^"\']+)["\']?\s*\)'
]

# 注释说明模式
comment_patterns = [
    r'/\*\*([^*]|\*(?!/))*\*/',  # 多行注释
    r'//\s*(.+)',  # 单行注释
    r'@ApiOperation\s*\(\s*["\']([^"\']+)["\']',  # Swagger注解
    r'@Api\s*\(\s*["\']([^"\']+)["\']'  # Swagger注解
]


def get_all_projects():
    """获取所有有权限的项目，包含desc描述"""
    data = []
    try:
        for g in gl.groups.list(all=True):
            for p in g.projects.list(all=True):
                project = gl.projects.get(p.id)
                item = {
                    "id": p.id,
                    "group": g.name,
                    "project": p.name,
                    "path": project.path,
                    "web_url": project.web_url,
                    "desc": project.description if hasattr(project, 'description') else ''
                }
                data.append(item)
    except Exception as e:
        print(f"获取项目列表时出错: {e}")

    # 将数据保存为 JSON 文件
    with open('all_projects.json', 'w', encoding='utf-8') as file:
        json.dump(data, file, indent=4, ensure_ascii=False)

    return data


def search_java_files(project_id):
    """递归遍历项目文件树，查找所有Java文件"""
    try:
        project = gl.projects.get(project_id)
        print(f"开始递归遍历项目 {project_id} 的文件树查找Java文件...")
        default_branch = project.default_branch
        print(f"默认分支: {default_branch}")
        java_files = find_java_files_recursive(project, '', default_branch)
        print(f"项目 {project_id} 最终找到 {len(java_files)} 个Java文件")
        return java_files
    except Exception as e:
        print(f"递归遍历项目 {project_id} 的Java文件时出错: {e}")
        return []

def find_java_files_recursive(project, path, ref):
    """递归查找所有Java文件（无深度限制，支持分页）"""
    java_files = []
    page = 1
    per_page = 100
    while True:
        try:
            items = project.repository_tree(path=path, ref=ref, per_page=per_page, page=page)
        except Exception as e:
            print(f"递归查找路径 {path} 时出错: {e}")
            return java_files
        if not items:
            break
        for item in items:
            if item['type'] == 'tree':  # 目录
                sub_files = find_java_files_recursive(project, item['path'], ref)
                java_files.extend(sub_files)
            elif item['type'] == 'blob' and (item['name'].__contains__("Controller") or item['name'].__contains__("Api")):
                java_files.append({
                    'path': item['path'],
                    'filename': item['name'],
                    'ref': ref
                })
        if len(items) < per_page:
            break
        page += 1
    return java_files


def get_file_content(project_id, file_path, ref='main'):
    """获取文件内容"""
    try:
        project = gl.projects.get(project_id)
        file_content = project.files.get(file_path, ref=ref)
        content = file_content.decode().decode('utf-8')
        print(f"成功读取文件 {file_path}，内容长度: {len(content)} 字符")
        return content
    except Exception as e:
        print(f"获取文件 {file_path} 内容时出错: {e}")
        return ""


def extract_api_info(content, file_path):
    """从Java文件内容中提取API信息，javadoc保留完整内容，类名提取更健壮"""
    apis = []

    # 检查是否为Controller类
    is_controller = any(re.search(pattern, content) for pattern in controller_patterns)
    if not is_controller:
        print(f"文件 {file_path} 不是Controller类，跳过")
        return apis

    print(f"文件 {file_path} 是Controller类，开始提取API")

    lines = content.split('\n')
    current_class = ""
    pending_controller = False

    # 提取类级别的@RequestMapping路径
    class_level_path = ""
    for i, line in enumerate(lines):
        line = line.strip()
        # 查找类级别的@RequestMapping注解
        class_request_match = re.search(r'@RequestMapping\s*\(\s*["\']([^"\']+)["\']', line)
        if class_request_match:
            class_level_path = class_request_match.group(1)
            # 确保路径以/开头
            if not class_level_path.startswith("/"):
                class_level_path = "/" + class_level_path
            # 确保路径不以/结尾（除非是根路径）
            if class_level_path != "/" and class_level_path.endswith("/"):
                class_level_path = class_level_path.rstrip("/")
            break

    for i, line in enumerate(lines):
        line = line.strip()
        # 标记注解
        if '@RestController' in line or '@Controller' in line or '@FeignClient' in line:
            pending_controller = True
        # 匹配类或接口声明
        class_match = re.search(r'(public\s+)?(abstract\s+)?(class|interface)\s+(\w+)', line)
        if class_match and pending_controller:
            current_class = class_match.group(4)
            print(f"找到Controller类: {current_class}")
            pending_controller = False

        # 提取方法名和注解
        for pattern in api_path_patterns + api_path_patterns_loose:
            if re.search(pattern, line):
                # 查找方法名
                method_match = re.search(r'(\w+)\s*\([^)]*\)\s*{?', line)
                if not method_match:
                    continue
                current_method = method_match.group(1)

                # 提取URL路径
                path_match = re.search(pattern, line)
                if path_match:
                    method_path = path_match.group(1)

                    # 组合类级别路径和方法级别路径
                    if class_level_path:
                        # 处理方法路径，确保格式正确
                        clean_method_path = method_path.lstrip("/")
                        clean_class_path = class_level_path.lstrip("/")

                        # 如果方法路径与类路径相同，则跳过该API（避免/insure/insure这样的重复路径）
                        if clean_method_path == clean_class_path:
                            print(f"跳过重复路径API: {current_method} -> {method_path}")
                            continue

                        # 避免路径重复的情况
                        # 如果方法路径以类路径开头，则直接使用方法路径
                        if clean_method_path.startswith(clean_class_path):
                            url_path = "/" + clean_method_path
                        else:
                            # 组合类路径和方法路径
                            url_path = class_level_path.rstrip("/") + "/" + clean_method_path

                    else:
                        url_path = method_path

                    # 确保最终路径格式正确
                    if not url_path.startswith("/"):
                        url_path = "/" + url_path

                    print(f"找到API: {current_method} -> {url_path}")

                    # 改进注释提取逻辑
                    comment = ""
                    # 1. 向上查找@ApiOperation
                    # (如需保留可加回来)
                    # 2. 向上查找javadoc
                    if not comment:
                        for j in range(i-1, max(0, i-15), -1):
                            comment_line = lines[j].strip()
                            if comment_line.startswith("/**"):
                                # 合并javadoc
                                javadoc = [comment_line]
                                for k in range(j+1, min(len(lines), j+10)):
                                    l = lines[k].strip()
                                    javadoc.append(l)
                                    if l.endswith("*/"):
                                        break
                                javadoc_text = " ".join(javadoc)
                                javadoc_text = re.sub(r"/\*\*|\*/", "", javadoc_text)
                                javadoc_text = re.sub(r"\*", "", javadoc_text)
                                javadoc_text = javadoc_text.strip()
                                if javadoc_text:
                                    comment = javadoc_text.strip()
                                break
                    # 3. 向上查找单行注释
                    if not comment:
                        for j in range(i-1, max(0, i-5), -1):
                            comment_line = lines[j].strip()
                            if comment_line.startswith("//"):
                                comment = comment_line.lstrip("/").strip()
                                break
                    # 4. 没有注释
                    if not comment:
                        comment = "无说明"

                    apis.append({
                        'class': current_class,
                        'method': current_method,
                        'url': url_path,
                        'comment': comment.strip()
                    })

    print(f"文件 {file_path} 提取到 {len(apis)} 个API")
    return apis


def load_json_projects(completed_file):
    """加载已完成项目列表，返回字典列表"""
    if os.path.isfile(completed_file):
        try:
            with open(completed_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception:
            return []
    return []

def save_completed_project(completed_file, project_info):
    """将已完成项目追加到文件，存储项目id、path、project、web_url、desc"""
    # 使用portalocker确保多进程安全
    with open(completed_file, 'a+', encoding='utf-8') as f:
        try:
            # 获取文件锁，阻塞等待直到获取锁
            portalocker.lock(f, portalocker.LOCK_EX)

            # 读取现有内容
            f.seek(0)
            try:
                completed = json.load(f)
            except json.JSONDecodeError:
                completed = []
            except Exception:
                completed = []

            print(f"完成的项目数量 {len(completed)} 当前项目{project_info['path']}")
            # 检查是否已存在（通过项目ID去重）
            exists = any(item['id'] == project_info['id'] for item in completed)
            if not exists:
                completed.append({
                    'id': project_info['id'],
                    'path': project_info['path'],
                    'project': project_info.get('project', ''),
                    'web_url': project_info.get('web_url', ''),
                    'desc': project_info.get('desc', '')
                })

                # 清空文件并写入更新后的内容
                f.seek(0)
                f.truncate()
                json.dump(completed, f, ensure_ascii=False, indent=2)
        finally:
            # 释放锁
            try:
                portalocker.unlock(f)
            except:
                pass


def analyze_project(project_info, output_file, completed_file):
    """分析单个项目的API并写入文件，并记录已完成项目"""
    project_id = project_info['id']
    project_name = project_info['project']
    project_path = project_info['path']

    print(f"正在分析项目: {project_path}")

    # 搜索Java文件
    java_files = search_java_files(project_id)

    if not java_files:
        print(f"项目 {project_path} 没有找到Java文件")
        save_completed_project(completed_file, project_info)
        return 0

    project_apis = []
    controller_files = 0

    for java_file in java_files:
        file_path = java_file['path']
        ref = java_file.get('ref', 'main')

        # 获取文件内容
        content = get_file_content(project_id, file_path, ref)
        if content:
            # 检查是否包含Controller注解
            if any(re.search(pattern, content) for pattern in controller_patterns):
                controller_files += 1

            # 提取API信息
            apis = extract_api_info(content, file_path)

            for api in apis:
                api_info = {
                    'project': project_path,
                    'file': file_path,
                    'class': api['class'],
                    'method': api['method'],
                    'url': api['url'],
                    'comment': api['comment']
                }
                project_apis.append(api_info)
                # 实时写入文件（使用锁保护）
                with open(output_file, 'a', encoding='utf-8') as f:
                    try:
                        # 获取文件锁，阻塞等待直到获取锁
                        portalocker.lock(f, portalocker.LOCK_EX)
                        json.dump(api_info, f, ensure_ascii=False)
                        f.write('\n')
                    finally:
                        # 释放锁
                        try:
                            portalocker.unlock(f)
                        except:
                            pass

    # 记录已完成项目
    save_completed_project(completed_file, project_info)
    print(f"项目 {project_path} 分析完成，找到 {controller_files} 个Controller文件，{len(project_apis)} 个API接口")
    return len(project_apis)


def save_results_to_excel(all_apis, completed_file, output_excel_file='api_results.xlsx'):
    """将结果保存到Excel文件，包含两个sheet：API结果和项目信息"""

    # 第一个sheet：API结果表格
    # 准备表格数据
    table_data = []
    for api in all_apis:
        project = api['project']
        url = api['url']
        comment = api['comment']
        file_path = api['file']
        class_name = api['class']
        method = api['method']

        # # 截断过长的字段
        # if len(project) > 30:
        #     project = project[:27] + "..."
        # if len(url) > 50:
        #     url = url[:47] + "..."
        # if len(comment) > 60:
        #     comment = comment[:57] + "..."
        # if len(file_path) > 50:
        #     file_path = file_path[:47] + "..."

        table_data.append({
            '项目': project,
            '文件路径': file_path,
            '类名': class_name,
            '方法': method,
            'URL': url,
            '说明': comment
        })

    # 创建第一个sheet的数据框
    df_apis = pd.DataFrame(table_data)

    # 第二个sheet：项目信息
    # 从completed_projects.json加载项目信息
    completed_projects = []
    if os.path.exists(completed_file):
        try:
            with open(completed_file, 'r', encoding='utf-8') as f:
                completed_projects = json.load(f)
        except Exception as e:
            print(f"读取已完成项目文件时出错: {e}")

    # 准备项目信息数据
    project_data = []
    for project in completed_projects:
        project_data.append({
            'ID': project.get('id', ''),
            '项目路径': project.get('path', ''),
            '项目名称': project.get('project', ''),
            'Web URL': project.get('web_url', ''),
            '描述': project.get('desc', '')
        })

    # 创建第二个sheet的数据框
    df_projects = pd.DataFrame(project_data)

    # 保存到Excel文件，使用不同的sheet
    with pd.ExcelWriter(output_excel_file, engine='openpyxl') as writer:
        df_apis.to_excel(writer, sheet_name='API结果', index=False)
        df_projects.to_excel(writer, sheet_name='项目信息', index=False)

    print(f"结果已保存到Excel文件: {output_excel_file}")


def print_api_results(all_apis):
    """按照表格格式打印API结果，去重（项目、url、method唯一）"""
    # 去重
    unique_apis = {}
    for api in all_apis:
        key = (api['project'], api['url'], api['method'])
        if key not in unique_apis:
            unique_apis[key] = api
    all_apis = list(unique_apis.values())

    print(f"总共找到 {len(all_apis)} 个API接口")


def main():
    """主函数"""
    print("开始搜索Git仓库中的Java Controller API接口...")

    # 测试GitLab连接
    try:
        print("测试GitLab连接...")
        user = gl.user
        # print(f"当前用户: {user.username}")
        # print(f"用户ID: {user.id}")
        print("GitLab连接成功")
    except Exception as e:
        print(f"GitLab连接失败: {e}")
        return

    # 检查项目数据文件是否存在
    file_path = 'all_projects.json'
    if os.path.isfile(file_path):
        with open(file_path, 'r', encoding='utf-8') as file:
            projects = json.load(file)
    else:
        print("正在获取所有项目列表...")
        projects = get_all_projects()

    print(f"找到 {len(projects)} 个项目")

    output_file = 'api_results_temp.json'
    completed_file = 'completed_projects.json'

    print("开始分析项目，结果将实时保存到文件中...")
    print("-" * 60)

    # 加载已完成项目
    completed_projects = load_json_projects(completed_file)
    completed_ids = set(item['id'] for item in completed_projects)
    print(f"已完成项目数: {len(completed_ids)}，将跳过这些项目")

    # 调试：打印样例和未完成项目
    if projects:
        print("projects样例:", projects[0])
    if completed_projects:
        print("completed_projects样例:", completed_projects[0])
    unfinished = [p['id'] for p in projects if p['id'] not in completed_ids]
    print("未完成项目ID：", unfinished)
    print("未完成项目数量：", len(unfinished))

    total_apis = 0
    to_scan_projects = [p for p in projects if p['id'] not in completed_ids]
    print(f"本次需分析项目数: {len(to_scan_projects)}")

    # 如果没有需要分析的项目，则直接从api_results.json读取数据
    if len(to_scan_projects) == 0:
        print("没有需要分析的新项目，直接从现有结果文件中读取数据...")
        all_apis = []
        try:
            # 从api_results.json读取现有数据
            if os.path.exists('api_results.json'):
                with open('api_results.json', 'r', encoding='utf-8') as f:
                    all_apis = json.load(f)
                print(f"从现有结果文件中读取到 {len(all_apis)} 个API接口")
            else:
                print("未找到现有的api_results.json文件")
        except Exception as e:
            print(f"读取现有结果文件时出错: {e}")

        # 打印结果表格
        if all_apis:
            print_api_results(all_apis)
            # 保存结果到Excel文件
            save_results_to_excel(all_apis, completed_file, 'api_results.xlsx')
        else:
            print("未找到任何API接口")

        print(f"\n结果已从现有文件中读取并保存到 api_results.xlsx 文件中")
        return

    # 如果有需要分析的项目，则继续原来的流程
    pool = Pool(processes=5)
    results = []

    for project in to_scan_projects:
        result = pool.apply_async(analyze_project, args=(project, output_file, completed_file))
        results.append(result)

    completed_projects_count = len(completed_projects)
    for result in results:
        try:
            api_count = result.get(timeout=300)
            total_apis += api_count
            completed_projects_count += 1
            print(f"进度: {completed_projects_count}/{len(projects)} 项目完成，当前找到 {total_apis} 个API接口")
        except Exception as e:
            print(f"分析项目时出错: {e}")
            completed_projects_count += 1

    pool.close()
    pool.join()

    print("-" * 60)
    print(f"所有项目分析完成，总共找到 {total_apis} 个API接口")

    # 读取所有结果并转换为列表格式，去重后写入最终文件
    all_apis = []
    try:
        with open(output_file, 'r', encoding='utf-8') as f:
            for line in f:
                if line.strip():
                    api_info = json.loads(line.strip())
                    all_apis.append(api_info)
        # 去重
        unique_apis = {}
        for api in all_apis:
            key = (api['project'], api['url'], api['method'])
            if key not in unique_apis:
                unique_apis[key] = api
        all_apis = list(unique_apis.values())
        # 写入最终文件（仅当有新数据时才更新）
        if all_apis:
            # 如果已存在结果文件，则合并数据
            if os.path.exists('api_results.json'):
                with open('api_results.json', 'r', encoding='utf-8') as f:
                    try:
                        existing_apis = json.load(f)
                        # 合并现有数据和新数据
                        all_apis_dict = {}
                        # 先加入现有数据
                        for api in existing_apis:
                            key = (api['project'], api['url'], api['method'])
                            all_apis_dict[key] = api
                        # 再加入新数据（覆盖重复项）
                        for api in all_apis:
                            key = (api['project'], api['url'], api['method'])
                            all_apis_dict[key] = api
                        # 转换回列表
                        all_apis = list(all_apis_dict.values())
                    except json.JSONDecodeError:
                        # 如果现有文件损坏，则使用新数据
                        pass
            with open('api_results.json', 'w', encoding='utf-8') as file:
                json.dump(all_apis, file, indent=4, ensure_ascii=False)
        # 清空临时文件
        with open(output_file, 'w', encoding='utf-8') as f:
            pass
    except Exception as e:
        print(f"读取结果文件时出错: {e}")

    # 打印结果表格
    if all_apis:
        print_api_results(all_apis)
        # 保存结果到Excel文件
        save_results_to_excel(all_apis, completed_file, 'api_results.xlsx')
    else:
        print("未找到任何API接口")

    print(f"\n结果已保存到 api_results.json 和 api_results.xlsx 文件中")


if __name__ == '__main__':
    main()
