#!/usr/bin/env python3
import re
import csv
import os

def extract_urls_from_searchres(repo_dir):
    # 打开文件
    with open('_https_git_zip_tar_gz_tar_bz2_7z_rar_xz_gz_bz2_.code-search.txt', 'r') as file:
        content = file.read()

    # 定义正则表达式模式
    repo_pattern = r'build_tools/output/repo/([^/]+)/CMakeLists\.txt:'
    url_pattern = r'(?:GIT_REPOSITORY|URL|VCS_URL)\s+(https://[^\s]+)(?:\.git|\.zip|\.tar\.gz|\.tar\.bz2|\.7z|\.rar|\.xz|\.gz|\.bz2)?'

    # 查找所有仓库和URL
    repos = {}
    current_repo = None

    for line in content.split('\n'):
        # 查找仓库名称
        repo_match = re.search(repo_pattern, line)
        if repo_match:
            current_repo = repo_match.group(1)
            if current_repo not in repos:
                repos[current_repo] = []
            continue
        
        # 查找URL
        url_match = re.search(url_pattern, line)
        if url_match and current_repo:
            url = url_match.group(1)
            if url not in repos[current_repo]:
                repos[current_repo].append(url)

    # 将结果写入CSV文件，不使用引号
    with open('repo_urls.csv', 'w', newline='') as csvfile:
        writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE, escapechar='\\')
        writer.writerow(['Repository', 'URL'])
        
        for repo, urls in repos.items():
            for url in urls:
                writer.writerow([repo, url])

    print("CSV文件已生成：repo_urls.csv")



def extract_urls_from_cmakelists(repo_dir):
    """
    从仓库目录中的CMakeLists.txt文件提取URL
    
    参数:
        repo_dir: 仓库目录路径
    
    返回:
        包含仓库名和URL列表的元组
    """
    repo_name = os.path.basename(repo_dir)
    
    # 定义需要跳过的仓库列表
    skip_repos = [
        "ompl",
        "fastrtps",
        "septentrio-gnss-driver"
    ]
    
    # 检查当前仓库是否在跳过列表中
    if repo_name in skip_repos:
        print(f"跳过仓库: {repo_name}")
        return repo_name, []
    
    cmakelists_path = os.path.join(repo_dir, "CMakeLists.txt")
    urls = []
    
    if not os.path.exists(cmakelists_path):
        return repo_name, urls
    
    # 正则表达式匹配非注释行中的https URL，且URL以.git或其他指定扩展名结尾，或者没有扩展名
    url_pattern = re.compile(r'^(?!\s*#.*https:)(?!.*\bPROJECT_URL\b)(?!.*\bHOMEPAGE_URL\b)(?!.*\bMESSAGE\()(?!.*\bmessage\b)(?!.*\badd_compile_options\b)(?!.*# skip for)(?!.*or install).*\b(https:.*)')
    
    try:
        with open(cmakelists_path, 'r', encoding='utf-8') as f:
            for line in f:
                match = url_pattern.search(line)
                if match:
                    # 提取完整的URL
                    url_start = line.find('https:')
                    if url_start != -1:
                        # 查找URL的结束位置（空格、引号、括号等）
                        url_end = len(line)
                        for char in [' ', '"', "'", ')', '>']:
                            pos = line.find(char, url_start)
                            if pos != -1 and pos < url_end:
                                url_end = pos
                        
                        url = line[url_start:url_end]
                        urls.append(url)
    except Exception as e:
        print(f"处理 {cmakelists_path} 时出错: {e}")
    
    return repo_name, urls

def remove_duplicates():
    """
    根据仓库名和URL去重
    """
    # 读取CSV文件
    rows = []
    seen = set()  # 用于存储已经见过的(仓库名, URL)组合
    with open('repo_urls.csv', 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)  # 保存标题行
        for row in reader:
            if len(row) >= 2:
                # 创建仓库名和URL的组合作为唯一标识
                key = (row[0], row[1])
                if key not in seen:
                    seen.add(key)
                    rows.append(row)

    # 写回CSV文件
    with open('repo_urls.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)  # 写入标题行
        writer.writerows(rows)

    print("已完成仓库名和URL去重")

def split_by_archive_type():
    """
    将URL按照压缩包类型分成两个CSV文件
    """
    # 定义压缩包扩展名
    archive_extensions = ('.zip', '.tar.gz', '.tar.bz2', '.7z', '.rar', '.xz', '.gz', '.bz2','.blob','.run','.cmake')
    
    # 准备两个列表存储不同类型的URL
    archive_rows = []
    git_rows=[]
    others_rows = []
    
    # 读取CSV文件
    with open('repo_urls.csv', 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        header = next(reader)  # 保存标题行
        
        for row in reader:
            if len(row) >= 2:
                url = row[1]
                # 检查URL是否以任何压缩包扩展名结尾
                is_archive = any(url.lower().endswith(ext) for ext in archive_extensions)
                
                if is_archive:
                    archive_rows.append(row)
                elif url.lower().endswith('.git'):
                    git_rows.append(row)
                else:
                    others_rows.append(row)
    
    # 写入压缩包URL的CSV文件
    with open('archive_urls.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)
        writer.writerows(archive_rows)
        
        # 写入非压缩包URL的CSV文件
    with open('git_urls.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)
        writer.writerows(git_rows)
    
    
    # 写入非压缩包URL的CSV文件
    with open('other_urls.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(header)
        writer.writerows(others_rows)
    
    print(f"已将URL分类完成：")
    print(f"压缩包URL数量：{len(archive_rows)}")
    print(f"非压缩包URL数量：{len(git_rows)}")
    print(f"非压缩包URL数量：{len(others_rows)}")
    
def main():
    """
    主函数：遍历仓库目录，提取URL，并输出CSV格式
    """
    base_dir = "build_tools/output/repo"
    output_file = "repo_urls.csv"
    
    # 检查基础目录是否存在
    if not os.path.exists(base_dir):
        print(f"错误: 目录 {base_dir} 不存在")
        return
    
    results = []
    
    # 遍历基础目录下的所有子目录（仓库）
    for repo_name in os.listdir(base_dir):
        repo_path = os.path.join(base_dir, repo_name)
        if os.path.isdir(repo_path):
            repo, urls = extract_urls_from_cmakelists(repo_path)
            if urls:
                for url in urls:
                    results.append([repo, url])
            else:
                # 即使没有URL也记录仓库名
                # results.append([repo, ""])
                pass
    
    # 将结果写入CSV文件，不使用引号
    with open(output_file, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f, quoting=csv.QUOTE_NONE, escapechar='\\')
        writer.writerow(["仓库名", "URL"])
        writer.writerows(results)
    
    print(f"已将URL信息写入 {output_file}")
    
    # 删除空行
    del_blank_lines()
    
    # 删除URL尾部的斜杠
    remove_trailing_slash()
    
    # 去重处理
    remove_duplicates()
    
    # 分类处理
    split_by_archive_type()
    
    print("已完成所有处理")

def del_blank_lines():
    # 读取CSV文件
    rows = []
    with open('repo_urls.csv', 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        for row in reader:
            if any(field.strip() for field in row):  # 只保留非空行
                rows.append(row)

    # 写回CSV文件
    with open('repo_urls.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerows(rows)

    print("已删除CSV文件中的空行")

def remove_trailing_slash():
    """
    删除CSV文件中URL尾部的斜杠
    """
    # 读取CSV文件
    rows = []
    with open('repo_urls.csv', 'r', encoding='utf-8') as f:
        reader = csv.reader(f)
        for row in reader:
            if len(row) >= 2:  # 确保行有足够的列
                # 删除URL尾部的斜杠
                if row[1].endswith('/'):
                    row[1] = row[1][:-1]
                # 删除URL尾部的反斜杠
                if row[1].endswith('\\'):
                    row[1] = row[1][:-1]
            rows.append(row)

    # 写回CSV文件
    with open('repo_urls.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerows(rows)

    print("已删除URL尾部的斜杠")
    
if __name__ == "__main__":
    main()
    # del_blank_lines()  # 已移到main函数中
    # remove_trailing_slash()  # 已移到main函数中

