import re
import requests
import base64
import json
import time
import sqlite3
import os
from urllib.parse import urlparse
from typing import List, Tuple, Optional, Dict
from datetime import datetime
from ds_api import call_llm_api
from dotenv import load_dotenv

# 加载.env文件中的环境变量
load_dotenv()

class ReadmeAnalyzer:
    """GitHub仓库README分析工具"""
    
    # README文件候选名
    README_CANDIDATES = [
        'README.md', 'readme.md', 'README.rst', 
        'readme.rst', 'README.txt', 'README',
        'README.MD',
        'README_zh.md', 'README.en.md', 'README_CN.md'
    ]
    
    # requirements.txt文件候选名
    REQUIREMENTS_CANDIDATES = [
        'requirements.txt', 'requirements-dev.txt', 'requirements-test.txt',
        'requirements/prod.txt', 'requirements/dev.txt', 'requirements/test.txt'
    ]

    # pip包名匹配模式 - 只提取标准PyPI包名
    PIP_PATTERN = re.compile(
        r'(?:^|\s|["\'`])'                      # 开始或引号
        r'(?:pip|python\s+-m\s+pip|py\s+-m\s+pip)\s+install' # pip install
        r'(?:\\\s*\n\s*)?'                      # 支持反斜杠续行
        r'(?:--?\w+(?:\s+[^\s-]+)?\s+)*'        # 各种参数
        r'(?:-r\s+\S+\s+)?'                     # 排除requirements文件
        r'(?![a-z]+\+https?://)'                # 排除非标准安装（如git+https://）
        r"(?:['\"])?"                           # 可选引号
        r'([a-zA-Z0-9][a-zA-Z0-9\-_\.]*(?:\[[^\]]+\])?)'  # 包名（支持extras）
        r'(?:\s*[=><~!]+\s*[^\s,\'\"`;\]]+)?'   # 版本约束
        r'(?:\s*;\s*[^\s]+)?'                   # 环境标记
        r"(?:['\"])?",                          # 可选引号
        re.IGNORECASE | re.MULTILINE            # 多行和忽略大小写
    )

    # requirements.txt包名匹配模式
    REQUIREMENTS_PATTERN = re.compile(
        r'^'                                    # 行首
        r'\s*'                                  # 可选空白
        r'([a-zA-Z0-9][a-zA-Z0-9\-_\.]*)'      # 包名
        r'(?:\s*[=><~!]+\s*[^\s#]+)?'          # 版本约束
        r'(?:\s*;\s*[^\s#]+)?'                 # 环境标记
        r'(?:\s*#.*)?'                         # 注释
        r'\s*$',                               # 行尾
        re.IGNORECASE | re.MULTILINE           # 多行和忽略大小写
    )

    # 单独的模式用于识别和排除非标准安装
    NON_PYPI_PATTERNS = [
        r'https?://',               # HTTP/HTTPS URL
        r'git\+',                   # Git URL
        r'svn\+',                   # SVN URL
        r'hg\+',                    # Mercurial URL
        r'\./',                     # 本地路径
        r'/',                       # 绝对路径
        r'file://',                 # 文件URL
        r'\.(tar\.gz|zip|whl|txt)', # 压缩文件和文本文件
        r'@',                       # 版本指定符
    ]
    # 编译非PyPI模式
    NON_PYPI_PATTERN = re.compile(
        r'(' + '|'.join(NON_PYPI_PATTERNS) + r')',
        re.IGNORECASE
    )

    # 添加频率控制相关的类变量
    _last_llm_call_time = 0
    _qpm_interval = 60.0  # 默认每分钟1次请求的间隔

    @classmethod
    def log(cls, msg: str):
        """统一日志输出"""
        print(f"[{time.strftime('%Y-%m-%d %H:%M:%S')}] {msg}")

    @classmethod
    def extract_repo_info(cls, url: str) -> Tuple[Optional[str], Optional[str]]:
        """解析GitHub仓库信息（基于网页8的URL处理逻辑）"""
        parsed = urlparse(url)
        path_segments = parsed.path.strip('/').split('/')
        if len(path_segments) >= 2:
            return path_segments[0], path_segments[1]
        cls.log(f"无法解析仓库信息: {url}")
        return None, None
    
    @classmethod
    def fetch_file_content(cls, owner: str, repo: str, filename: str, token: str = None) -> Optional[str]:
        """获取GitHub仓库中指定文件的内容"""
        headers = {'Authorization': f'token {token}'} if token else {}
        
        try:
            response = requests.get(
                f"https://api.github.com/repos/{owner}/{repo}/contents/{filename}",
                headers=headers,
                timeout=10
            )
            if response.status_code == 200:
                cls.log(f"{owner}/{repo} 仓库中找到 {filename}")
                return base64.b64decode(response.json()['content']).decode('utf-8')
            return None
        except Exception as e:
            cls.log(f"抓取 {filename} 出错: {e}")
            return None

    @classmethod
    def fetch_readme(cls, owner: str, repo: str, token: str = None) -> Optional[str]:
        """获取README内容（支持多格式，参考网页3的.rst处理）"""
        for readme_name in cls.README_CANDIDATES:
            content = cls.fetch_file_content(owner, repo, readme_name, token)
            if content:
                return content
        cls.log(f"{owner}/{repo} 未找到README")
        return None
    
    @classmethod
    def fetch_requirements(cls, owner: str, repo: str, token: str = None) -> Dict[str, str]:
        """获取requirements.txt文件内容"""
        requirements_content = {}
        for req_name in cls.REQUIREMENTS_CANDIDATES:
            content = cls.fetch_file_content(owner, repo, req_name, token)
            if content:
                requirements_content[req_name] = content
        return requirements_content
    
    @classmethod
    def preprocess_text(cls, text: str) -> str:
        """预处理文本：合并续行、移除注释"""
        # 合并续行符
        text = re.sub(r'\\\s*\n\s*', ' ', text)
        
        return text

    @classmethod
    def parse_pip_commands(cls, text: str) -> list:
        """解析pip安装命令，提取标准PyPI包名"""
        packages = []
        
        # 预处理文本：合并续行,不移除注释（大多时候注释中有很多安装的重要信息）
        processed_text = cls.preprocess_text(text)

        # 按行处理，跳过包含特殊安装方式的行
        lines = processed_text.split('\n')
        for line in lines:
            # 跳过包含这些模式的行：requirements文件、本地安装、git仓库
            if any(pattern in line.lower() for pattern in ['-r ', 'git+', 'install .', 'install -e', 'http://', 'https://', './', '/']):
                continue
                
            # 匹配标准包名
            for match in cls.PIP_PATTERN.finditer(line):
                package = match.group(1).strip()
                
                # 验证包名有效性
                if (package and len(package) > 1 and 
                    not cls.NON_PYPI_PATTERN.search(package)):
                    packages.append(package)
        
        # 去重并保持顺序
        unique_packages = []
        seen = set()
        for pkg in packages:
            base_pkg = pkg.split('[')[0].lower().replace('-', '_')
            if base_pkg not in seen:
                unique_packages.append(pkg)
                seen.add(base_pkg)
        
        cls.log(f"找到 {len(unique_packages)} 个PyPI包: {unique_packages}")
        return unique_packages

    @classmethod
    def parse_requirements(cls, text: str) -> list:
        """解析requirements.txt内容，提取标准PyPI包名"""
        packages = []
        
        # 按行处理
        lines = text.split('\n')
        for line in lines:
            # 跳过空行和注释
            line = line.strip()
            if not line or line.startswith('#'):
                continue
                
            # 跳过包含非PyPI模式的行
            if cls.NON_PYPI_PATTERN.search(line):
                continue
                
            # 匹配包名
            match = cls.REQUIREMENTS_PATTERN.match(line)
            if match:
                package = match.group(1).strip()
                if package and len(package) > 1:
                    packages.append(package)
        
        # 去重并保持顺序
        unique_packages = []
        seen = set()
        for pkg in packages:
            base_pkg = pkg.lower().replace('-', '_')
            if base_pkg not in seen:
                unique_packages.append(pkg)
                seen.add(base_pkg)
        
        cls.log(f"从requirements.txt中找到 {len(unique_packages)} 个PyPI包: {unique_packages}")
        return unique_packages
        
    @classmethod
    def parse_json(cls, raw: str):
        """JSON解析"""
        if not raw or not isinstance(raw, str):
            return {}

        text = raw.strip()

        # 1. 移除markdown包裹
        text = re.sub(r"^```(json)?", "", text.strip(), flags=re.IGNORECASE)
        text = re.sub(r"```$", "", text.strip())
        
        # 2. 截取第一个 { 到 最后一个 } 之间的内容
        if "{" in text and "}" in text:
            text = text[text.find("{"): text.rfind("}") + 1]
        else:
            # 没有JSON结构，直接返回空
            return {}

        # 3. 多轮尝试解析
        for attempt in range(2):
            try:
                return json.loads(text)
            except json.JSONDecodeError:
                # 修复常见错误再重试
                text = re.sub(r"'", '"', text)        # 单引号转双引号
                text = re.sub(r",\s*}", "}", text)   # 去掉对象末尾多余逗号
                text = re.sub(r",\s*]", "]", text)   # 去掉数组末尾多余逗号

        # 4. 兜底：解析失败返回空字典
        return {}

    @classmethod
    def analyze_with_llm(
        cls, 
        readme_content: str, 
        repo_name: str, 
        model_name: str = "deepseek-chat",
        base_url: str = "https://api.deepseek.com",
        qpm: int = 1
    ) -> list:
        """使用LLM分析README中的pip安装目标（单个仓库）"""
        result = cls.analyze_batch_with_llm([{
            'repo_name': repo_name,
            'readme_content': readme_content
        }], qpm, base_url, model_name)
        return result.get(repo_name, [])
    
    @classmethod
    def analyze_batch_with_llm(
        cls, 
        repo_data_list: list,
        model_name: str = "deepseek-chat", 
        base_url: str = "https://api.deepseek.com",
        qpm: int = 1,
    ) -> dict:
        """使用LLM批量分析多个README中的pip安装目标
        
        Args:
            repo_data_list: 列表，每个元素是 {'repo_name': str, 'readme_content': str}
            qpm: 每分钟请求次数限制
            
        Returns:
            dict: {repo_name: [packages_list], ...}
        """
        # 频率控制：根据QPM限制请求频率
        interval = 60.0 / qpm  # 计算每次请求的间隔（秒）
        
        # 如果不是第一次调用，需要等待
        if cls._last_llm_call_time > 0:
            current_time = time.time()
            time_since_last_call = current_time - cls._last_llm_call_time
            if time_since_last_call < interval:
                sleep_time = interval - time_since_last_call
                cls.log(f"QPM限制：等待 {sleep_time:.2f} 秒...")
                time.sleep(sleep_time)
        
        # 构建批量分析的prompt
        repos_section = ""
        for i, repo_data in enumerate(repo_data_list, 1):
            repos_section += f"""=== 仓库 {i}: {repo_data['repo_name']} ==={repo_data['readme_content']}"""
        
        prompt = f"""
                 System: 你是一个资深 Python 开发工程师，同时也是 PyPI 包管理专家，
                 精通各种 pip install 命令格式（单行、多行、带 extras、带版本约束、使用反斜杠续行等）。
                 你的任务是分析 GitHub 仓库的 README 内容，准确提取其中所有标准 PyPI 包名，
                 忽略非 PyPI 来源（如本地路径、git 仓库、requirements 文件引用等）。
                 请仔细、认真、高质量地完成提取任务，用于生产级分析。

                 User: 请分析以下GitHub仓库的README文档，为每个仓库分别提取所有pip install命令中的标准PyPI包名。
                 
                 请详细、全面、透彻分析以下仓库：
                 {repos_section}
                 
                 请严循以下要求：

                 ** 一、分析目标 **
                 1. 找出每个仓库README中的所有pip install命令，包括单行和多行（使用反斜杠续行）形式。
                 2. 只提取标准PyPI包名，忽略：
                    - 本地路径安装（如 ./package）
                    - Git 仓库安装（如 git+https://...）
                    - requirements文件、环境文件或其它非pip install来源
                 3. 保留包名和可选的extras（如package[extra]）
                 4. 忽略版本号或版本约束，如 ==、>=、<=、~= 等,只保留包名
                 5. 支持多种 pip 命令写法，如：
                      单行标准写法: pip install requests flask numpy
                      带版本号: pip install requests==2.31.0 flask>=2.3.0 numpy<=1.25
                      带extras: pip install flask[async] requests[socks]
                      使用反斜杠续行: pip install \
                                        requests \
                                        flask[async] \
                                        numpy
                      忽略非 PyPI 来源： pip install ./local_package git+https://github.com/user/repo
                      等等
                 6. README 中可能包含注释或 Markdown 代码块，请只提取实际命令中的包名。
                 7. 如果同一包出现多次，只保留一次（去重）。
                 
                 ** 二、输出要求 **
                 1. 请严格按JSON格式返回结果。
                 2. 键为仓库名，值为该仓库提取的 PyPI 包名列表。
                 3. 不要添加出规定JSON返回数据格式外多余文字、解释或 Markdown 代码块。
                 {{
                     "仓库名1": ["package1", "package2[extras]"],
                     "仓库名2": ["package3", "package4"],
                     ...
                 }}

                 ** 三、示例 **
                 输入 README 内容：
                 pip install requests flask[async] numpy==1.25
                 pip install ./local_package git+https://github.com/user/repo

                 输出 JSON 数据：
                 {{
                     "example_repo": ["requests", "flask[async]", "numpy"]
                 }}

                 """
        
        try:
            # 调用内部的 LLM API
            content = call_llm_api(
                prompt, 
                model=model_name,
                base_url=base_url,
                verbose=False
            )
            cls.log(f"LLM批量分析结果: {content}")
            
            # 尝试解析JSON响应
            try:
                # 数据清洗，移除可能的markdown代码块标记，提取JSON数据
                parsed_result = cls.parse_json(content)
                
                parsed_result = json.loads(content)

                cls.log(f"LLM批量分析成功，处理了 {len(parsed_result)} 个仓库")
                cls.log(f"处理结果: {parsed_result}")
                # 在返回结果后，更新最后调用时间并强制等待完整间隔
                cls._last_llm_call_time = time.time()
                cls.log(f"QPM限制：强制等待完整间隔 {interval:.2f} 秒...")
                time.sleep(interval)
                
                return parsed_result
                
            except json.JSONDecodeError as e:
                cls.log(f"解析LLM响应JSON失败: {e}")
                cls.log(f"响应内容: {content}")
                
                # 即使解析失败也要更新时间并等待
                cls._last_llm_call_time = time.time()
                cls.log(f"QPM限制：强制等待完整间隔 {interval:.2f} 秒...")
                time.sleep(interval)
                return {}
                
        except Exception as e:
            cls.log(f"LLM批量分析出错: {str(e)}")
            
            # 即使出错也要更新时间并等待
            cls._last_llm_call_time = time.time()
            cls.log(f"QPM限制：强制等待完整间隔 {interval:.2f} 秒...")
            time.sleep(interval)
            return {}

class FileProcessor:
    """ 
    文件处理类，用于批量处理GitHub仓库信息：
    - 支持初始化SQLite数据库
    - 获取仓库元数据
    - 保存仓库信息到数据库
    - 支持批量处理README并提取pip依赖
    - 支持缓存机制和LLM分析
    """
    @staticmethod
    def init_database(db_path: str = "repos.db"):
        """
        初始化SQLite数据库，并创建仓库信息表（repositories）。

        Args:
            db_path (str): 数据库文件路径，默认 'repos.db'。
        """
        # 连接SQLite数据库，如果文件不存在会自动创建
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 创建表结构（如果不存在则创建）
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS repositories (
                id INTEGER PRIMARY KEY AUTOINCREMENT,         -- 自增主键
                name TEXT NOT NULL,                           -- 仓库名称
                url TEXT NOT NULL UNIQUE,                     -- 仓库URL，唯一
                owner TEXT,                                   -- 仓库所有者
                repo_name TEXT,                               -- 仓库名
                stars INTEGER,                                -- 星标数量
                last_updated TEXT,                            -- 最近更新时间
                description TEXT,                             -- 仓库描述
                language TEXT,                                -- 编程语言
                pip_packages TEXT,                            -- pip包列表（JSON字符串）
                license_name TEXT,                            -- 许可证名称
                size_kb INTEGER,                              -- 仓库大小（KB）
                created_at TEXT,                              -- 仓库创建时间
                processed_at TEXT DEFAULT CURRENT_TIMESTAMP,  -- 处理时间
                UNIQUE(owner, repo_name)                      -- owner+repo_name组合唯一
            )
        ''')
        
        # 提交事务并关闭连接
        conn.commit()
        conn.close()
        print(f"数据库已初始化: {db_path}")
    
    @staticmethod
    def fetch_repo_metadata(owner: str, repo: str, token: str = None) -> dict:
        """
        获取指定GitHub仓库的元数据信息（stars、语言、许可证等）。

        Args:
            owner (str): 仓库所有者用户名
            repo (str): 仓库名称
            token (str, optional): GitHub访问令牌，防止API速率限制

        Returns:
            dict: 仓库元数据信息，如果失败返回空字典
        """
        # 设置HTTP请求头，如果提供token，则使用授权
        headers = {'Authorization': f'token {token}'} if token else {}
        
        try:
            # 发起GET请求获取仓库信息
            response = requests.get(
                f"https://api.github.com/repos/{owner}/{repo}",
                headers=headers,
                timeout=10
            )
            
            if response.status_code == 200:
                # 请求成功，解析JSON并返回所需字段
                data = response.json()
                return {
                    'stars': data.get('stargazers_count', 0),
                    'last_updated': data.get('updated_at', ''),
                    'description': data.get('description', ''),
                    'language': data.get('language', ''),
                    'license_name': data.get('license', {}).get('name', '') if data.get('license') else '',
                    'size_kb': data.get('size', 0),
                    'created_at': data.get('created_at', '')
                }
            else:
                print(f"获取仓库元数据失败 {owner}/{repo}: HTTP {response.status_code}")
                return {}
                
        except Exception as e:
            print(f"获取仓库元数据出错 {owner}/{repo}: {str(e)}")
            return {}
    
    @staticmethod
    def save_to_database(name: str, url: str, owner: str, repo: str, 
                        packages: list, metadata: dict = None, 
                        db_path: str = "repos.db"):
        """
        将仓库信息保存到SQLite数据库中。
        
        Args:
            name (str): 仓库名称
            url (str): 仓库URL
            owner (str): 仓库所有者
            repo (str): 仓库名
            packages (list): pip依赖包列表
            metadata (dict, optional): 仓库元数据
            db_path (str): 数据库文件路径
        """
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        # 准备数据，将pip包列表转换为JSON字符串存储
        packages_str = json.dumps(packages) if packages else '[]'
        processed_at = datetime.now().isoformat()
        
        # 如果没有提供元数据，使用默认值
        if metadata is None:
            metadata = {}
        
        try:
            # 使用INSERT OR REPLACE来处理重复数据
            cursor.execute('''
                INSERT OR REPLACE INTO repositories 
                (name, url, owner, repo_name, stars, last_updated, description, 
                 language, pip_packages, license_name, size_kb, created_at, processed_at)
                VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            ''', (
                name,
                url,
                owner,
                repo,
                metadata.get('stars', 0),
                metadata.get('last_updated', ''),
                metadata.get('description', ''),
                metadata.get('language', ''),
                packages_str,
                metadata.get('license_name', ''),
                metadata.get('size_kb', 0),
                metadata.get('created_at', ''),
                processed_at
            ))
            
            conn.commit()
            print(f"已保存到数据库: {name} ({owner}/{repo})")
            
        except sqlite3.Error as e:
            print(f"数据库保存失败 {name}: {str(e)}")
        finally:
            conn.close()
    
    @staticmethod
    def extract_packages_from_repo(
        owner: str, 
        repo: str, 
        token: str = None, 
        use_llm: bool = False, 
        model_name: str = "deepseek-chat",
        base_url: str = "https://api.deepseek.com",
        qpm: int = 1
    ) -> List[str]:
        """从仓库中提取包名，包括README和requirements.txt
        
        Args:
            owner: 仓库所有者
            repo: 仓库名
            token: GitHub token
            use_llm: 是否使用LLM分析README
            qpm: LLM每分钟请求限制
            
        Returns:
            包名列表
        """
        all_packages = []
        
        # 1. 获取README内容并提取包名
        readme_content = ReadmeAnalyzer.fetch_readme(owner, repo, token)
        if readme_content:
            if use_llm:
                packages = ReadmeAnalyzer.analyze_with_llm(readme_content, repo, model_name, base_url, qpm)
            else:
                packages = ReadmeAnalyzer.parse_pip_commands(readme_content)
            all_packages.extend(packages)
        
        # 2. 获取requirements.txt内容并提取包名
        requirements_dict = ReadmeAnalyzer.fetch_requirements(owner, repo, token)
        for req_content in requirements_dict.values():
            req_packages = ReadmeAnalyzer.parse_requirements(req_content)
            all_packages.extend(req_packages)
        
        # 3. 去重处理
        unique_packages = []
        seen = set()
        for pkg in all_packages:
            # 处理带extras的包名进行更精确的去重
            base_pkg = pkg.split('[')[0].lower().replace('-', '_')
            if base_pkg not in seen:
                unique_packages.append(pkg)
                seen.add(base_pkg)
        
        # 4. 排序：将仓库名匹配的包放在第一位
        repo_name_lower = repo.lower().replace('-', '_')
        matched_packages = []
        other_packages = []
        
        for pkg in unique_packages:
            base_pkg = pkg.split('[')[0].lower().replace('-', '_')
            if base_pkg == repo_name_lower:
                matched_packages.append(pkg)
            else:
                other_packages.append(pkg)
        
        # 重新排序：匹配的包在前，其他包在后
        return matched_packages + other_packages

    @staticmethod
    def process_io(
        input_path: str, 
        output_path: str, 
        token: str = None, 
        use_llm: bool = False, 
        model_name: str = "deepseek-chat", 
        base_url="https://api.deepseek.com",
        qpm: int = 1, 
        batch_size: int = 5,
        save_to_db: bool = False, 
        db_path: str = "repos.db", 
        cache_days: int = 30
    ):
        """执行批处理流程 - 支持LLM分析选项、批量处理、数据库存储和缓存
        
        Args:
            cache_days: 缓存有效期（天数），默认30天
        """
        
        # 如果启用数据库存储，初始化数据库
        if save_to_db:
            FileProcessor.init_database(db_path)
        with open(input_path, 'r', encoding='utf-8') as infile:
            lines = [line.strip() for line in infile if line.strip()]
        
        # 开始时清空输出文件
        with open(output_path, 'w', encoding='utf-8') as outfile:
            pass  # 只是为了清空文件
        
        i = 0
        while i < len(lines):
            if use_llm and i + batch_size <= len(lines):
                # 批量处理
                batch_lines = lines[i:i+batch_size]
                batch_data = []
                valid_entries = []
                error_results = []  # 收集错误结果
                
                # 收集批次中的有效条目
                for line in batch_lines:
                    parts = re.split(r'\s+', line, maxsplit=1)
                    if len(parts) < 2:
                        error_results.append(f"{line} INVALID_FORMAT []\n")
                        continue
                    
                    name, url = parts[0], parts[1]
                    owner, repo = ReadmeAnalyzer.extract_repo_info(url)
                    
                    if not owner or not repo:
                        error_results.append(f"{name} {url} INVALID_URL []\n")
                        continue
                    
                    # 检查缓存
                    is_cached, cached_data = FileProcessor.check_cache_validity(owner, repo, db_path, cache_days)
                    if is_cached and cached_data:
                        # 使用缓存数据直接输出
                        packages_list = f"[{', '.join(cached_data['pip_packages'])}]"
                        cached_result = f"{name} {url} {packages_list}\n"
                        
                        with open(output_path, 'a', encoding='utf-8') as outfile:
                            outfile.write(cached_result)
                        
                        print(f"使用缓存数据: {name} ({owner}/{repo}) - 处理时间: {cached_data['processed_at']}")
                        continue
                    
                    try:
                        content = ReadmeAnalyzer.fetch_readme(owner, repo, token)
                        if not content:
                            error_results.append(f"{name} {url} README_NOT_FOUND []\n")

                            # 即使README未找到，也要尝试获取requirements.txt
                            requirements_dict = ReadmeAnalyzer.fetch_requirements(owner, repo, token)

                            if requirements_dict:
                                all_packages = []
                                for req_content in requirements_dict.values():
                                    req_packages = ReadmeAnalyzer.parse_requirements(req_content)
                                    all_packages.extend(req_packages)
                                
                                # 去重和排序
                                unique_packages = []
                                seen = set()
                                for pkg in all_packages:
                                    base_pkg = pkg.split('[')[0].lower().replace('-', '_')
                                    if base_pkg not in seen:
                                        unique_packages.append(pkg)
                                        seen.add(base_pkg)
                                
                                # 排序：将仓库名匹配的包放在第一位
                                repo_name_lower = repo.lower().replace('-', '_')
                                matched_packages = []
                                other_packages = []
                                
                                for pkg in unique_packages:
                                    base_pkg = pkg.split('[')[0].lower().replace('-', '_')
                                    if base_pkg == repo_name_lower:
                                        matched_packages.append(pkg)
                                    else:
                                        other_packages.append(pkg)

                                # 重新排序：匹配的包在前，其他包在后
                                reordered_packages = matched_packages + other_packages
                                
                                # 格式化包列表
                                packages_list = f"[{', '.join(reordered_packages)}]"
                                
                                # 写入结果
                                with open(output_path, 'a', encoding='utf-8') as outfile:
                                    outfile.write(f"{name} {url} {packages_list}\n")

                                # 保存到数据库
                                if save_to_db:
                                    metadata = FileProcessor.fetch_repo_metadata(owner, repo, token)
                                    FileProcessor.save_to_database(name, url, owner, repo, 
                                                                 reordered_packages, metadata, db_path)

                            else:
                                if save_to_db:
                                    # 如果README和requirements.txt都未找到，保存空结果到数据库
                                    owner_info, repo_info = ReadmeAnalyzer.extract_repo_info(url)
                                    if owner_info and repo_info:
                                        metadata = FileProcessor.fetch_repo_metadata(owner_info, repo_info, token)
                                        FileProcessor.save_to_database(name, url, owner_info, repo_info, 
                                                                     [], metadata, db_path)
                            continue
                        
                        batch_data.append({
                            'repo_name': repo,
                            'readme_content': content
                        })
                        valid_entries.append((name, url, repo))
                        
                    except Exception as e:
                        error_results.append(f"{name} {url} ERROR: {str(e)} []\n")
                
                # 先写入错误结果
                with open(output_path, 'a', encoding='utf-8') as outfile:
                    for error_result in error_results:
                        outfile.write(error_result)
                
                # 如果有有效的条目，批量调用LLM
                if batch_data:
                    print(f"批量LLM分析 {len(batch_data)} 个仓库的README...")
                    llm_results = ReadmeAnalyzer.analyze_batch_with_llm(batch_data, model_name, base_url, qpm)
                    
                    # 处理LLM结果并写入文件
                    batch_results = []
                    for j, (name, url, repo) in enumerate(valid_entries):
                        packages = llm_results.get(repo, [])
                        
                        # 后续处理逻辑保持不变
                        unique_packages = []
                        seen = set()
                        for pkg in packages:
                            base_pkg = pkg.split('[')[0].lower().replace('-', '_')
                            if base_pkg not in seen:
                                unique_packages.append(pkg)
                                seen.add(base_pkg)
                        
                        # 不区分大小写匹配仓库名，将匹配的包放在第一位
                        repo_name_lower = repo.lower().replace('-', '_')
                        matched_packages = []
                        other_packages = []
                        
                        for pkg in unique_packages:
                            base_pkg = pkg.split('[')[0].lower().replace('-', '_')
                            if base_pkg == repo_name_lower:
                                matched_packages.append(pkg)
                            else:
                                other_packages.append(pkg)
                        
                        # 重新排序：匹配的包在前，其他包在后
                        reordered_packages = matched_packages + other_packages
                        
                        # 格式化包列表
                        packages_list = f"[{', '.join(reordered_packages)}]"
                        
                        # 收集结果
                        batch_results.append(f"{name} {url} {packages_list}\n")
                        
                        # 如果启用数据库存储，保存到数据库
                        if save_to_db:
                            # 获取仓库元数据
                            owner, repo_name = ReadmeAnalyzer.extract_repo_info(url)
                            if owner and repo_name:
                                metadata = FileProcessor.fetch_repo_metadata(owner, repo_name, token)
                                FileProcessor.save_to_database(name, url, owner, repo_name, 
                                                             reordered_packages, metadata, db_path)
                    
                    # 将batch结果追加到输出文件
                    with open(output_path, 'a', encoding='utf-8') as outfile:
                        for result in batch_results:
                            outfile.write(result)
                    
                    print(f"已完成批次处理，结果已追加到 {output_path}")
                
                i += batch_size
                
            else:
                # 单个处理（当不使用LLM或剩余条目不足批次大小时）
                line = lines[i]
                parts = re.split(r'\s+', line, maxsplit=1)
                if len(parts) < 2:
                    with open(output_path, 'a', encoding='utf-8') as outfile:
                        outfile.write(f"{line} INVALID_FORMAT []\n")
                    i += 1
                    continue
                
                name, url = parts[0], parts[1]
                owner, repo = ReadmeAnalyzer.extract_repo_info(url)
                
                if not owner or not repo:
                    with open(output_path, 'a', encoding='utf-8') as outfile:
                        outfile.write(f"{name} {url} INVALID_URL []\n")
                    i += 1
                    continue
                
                # 检查缓存
                is_cached, cached_data = FileProcessor.check_cache_validity(owner, repo, db_path, cache_days)
                if is_cached and cached_data:
                    # 使用缓存数据直接输出
                    packages_list = f"[{', '.join(cached_data['pip_packages'])}]"
                    
                    with open(output_path, 'a', encoding='utf-8') as outfile:
                        outfile.write(f"{name} {url} {packages_list}\n")
                    
                    print(f"使用缓存数据: {name} ({owner}/{repo}) - 处理时间: {cached_data['processed_at']}")
                    i += 1
                    continue
                
                try:
                    # 使用统一的方法提取包名
                    packages = FileProcessor.extract_packages_from_repo(owner, repo, token, use_llm, qpm)
                    
                    # 格式化包列表
                    packages_list = f"[{', '.join(packages)}]"
                    
                    # 直接追加到输出文件
                    with open(output_path, 'a', encoding='utf-8') as outfile:
                        outfile.write(f"{name} {url} {packages_list}\n")
                    
                    # 如果启用数据库存储，保存到数据库
                    if save_to_db:
                        metadata = FileProcessor.fetch_repo_metadata(owner, repo, token)
                        FileProcessor.save_to_database(name, url, owner, repo, 
                                                     packages, metadata, db_path)

                except Exception as e:
                    with open(output_path, 'a', encoding='utf-8') as outfile:
                        outfile.write(f"{name} {url} ERROR: {str(e)} []\n")

                i += 1

    @staticmethod
    def check_cache_validity(owner: str, repo: str, db_path: str = "repos.db", cache_days: int = 30) -> tuple:
        """检查数据库中是否存在有效的缓存数据
        
        Args:
            owner: 仓库所有者
            repo: 仓库名
            db_path: 数据库路径
            cache_days: 缓存有效期（天数）
            
        Returns:
            tuple: (is_cached, cached_data)
            - is_cached: bool, 是否存在有效缓存
            - cached_data: dict, 缓存的数据（如果存在）
        """
        # 数据库文件不存在，则无缓存
        if not os.path.exists(db_path):
            return False, None
        
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        
        try:
            # 查询指定仓库记录
            cursor.execute('''
                SELECT name, url, owner, repo_name, stars, last_updated, 
                       description, language, pip_packages, license_name, 
                       size_kb, created_at, processed_at
                FROM repositories 
                WHERE owner = ? AND repo_name = ?
            ''', (owner, repo))
            
            row = cursor.fetchone()
            if not row:
                return False, None
            
            # 检查处理时间是否超过缓存期限
            processed_at = row[12]  # processed_at字段
            if processed_at:
                try:
                    processed_time = datetime.fromisoformat(processed_at)
                    current_time = datetime.now()
                    time_diff = current_time - processed_time
                    
                    if time_diff.days <= cache_days:
                        # 缓存仍然有效，返回缓存数据
                        cached_data = {
                            'name': row[0],
                            'url': row[1],
                            'owner': row[2],
                            'repo_name': row[3],
                            'stars': row[4],
                            'last_updated': row[5],
                            'description': row[6],
                            'language': row[7],
                            'pip_packages': json.loads(row[8]) if row[8] else [],
                            'license_name': row[9],
                            'size_kb': row[10],
                            'created_at': row[11],
                            'processed_at': row[12]
                        }
                        return True, cached_data
                except ValueError:
                    # 如果时间格式解析失败，视为无效缓存
                    pass
            
            return False, None
            
        except sqlite3.Error as e:
            print(f"数据库查询缓存失败: {str(e)}")
            return False, None
        finally:
            conn.close()

# 使用示例
if __name__ == "__main__":
    GITHUB_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN")
    REPOS_FILE = "./2025实战任务_作品文件夹/OpenCloudOS 9 AI软件自动化验证工具/张耀翔_作品/pipeline_output/test_repos.txt"
    OUTPUT_FILE = "./2025实战任务_作品文件夹/OpenCloudOS 9 AI软件自动化验证工具/张耀翔_作品/pipeline_output/test_results.txt"
    
    # 可以选择使用LLM分析
    USE_LLM = os.getenv("USE_LLM", "True").lower() == "true"  # 设置为True以启用LLM分析
    LLM_QPM = int(os.getenv("LLM_QPM", "1"))  # 每分钟请求次数限制
    BATCH_SIZE = int(os.getenv("BATCH_SIZE", "5"))  # 批处理大小，默认每次处理5个仓库
    MODEL_NAME = "deepseek/deepseek-chat-v3.1:free"
    BASE_URL = "https://openrouter.ai/api/v1"
    
    # 数据库存储选项
    SAVE_TO_DB = os.getenv("SAVE_TO_DB", "False").lower() == "true"  # 设置为True以启用数据库存储
    DB_PATH = os.getenv("DB_PATH", "./2025实战任务_作品文件夹/OpenCloudOS 9 AI软件自动化验证工具/张耀翔_作品/repos.db")  # 数据库文件路径
    CACHE_DAYS = int(os.getenv("CACHE_DAYS", "30"))  # 缓存有效期（天数）
    
    FileProcessor.process_io(
        input_path=REPOS_FILE, 
        output_path=OUTPUT_FILE, 
        token=GITHUB_TOKEN, 
        use_llm=USE_LLM, 
        model_name=MODEL_NAME,
        base_url=BASE_URL, 
        qpm=LLM_QPM, 
        batch_size=BATCH_SIZE,
        save_to_db=SAVE_TO_DB, 
        db_path=DB_PATH, 
        cache_days=CACHE_DAYS
    )
    print(f"处理完成！结果已保存至 {OUTPUT_FILE}")