#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
import os
import json
import sqlite3
import argparse
from datetime import datetime
import logging
from pathlib import Path

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('mcp-parser')

# 正则表达式模式 - 使其更加灵活
SERVER_PATTERN = r'- \[(.*?)\]\((.*?)\)(\s*[🎖️]?)?(\s*[🐍📇🏎️🦀#️⃣☕]?)?(\s*[☁️🏠]?)?(\s*[🍎🪟🐧]?)?\s*-\s*(.*)'
# 备用模式，不包含图标但格式相似
SERVER_PATTERN_ALT = r'- \[(.*?)\]\((.*?)\)\s*-\s*(.*)'
# 直接链接模式，无描述
SERVER_PATTERN_LINK = r'- \[(.*?)\]\((.*?)\)(\s*[🎖️]?)?(\s*[🐍📇🏎️🦀#️⃣☕]?)?(\s*[☁️🏠]?)?(\s*[🍎🪟🐧]?)?$'
# 中文格式链接模式
SERVER_PATTERN_ZH = r'- \[(.*?)\]\((.*?)\)(\s*[🎖️]?)?(\s*[🐍📇🏎️🦀#️⃣☕]?)?(\s*[☁️🏠]?)?(\s*[🍎🪟🐧]?)?\s*-\s*(.*)'
# 更宽松的链接模式，可以处理链接和描述之间的各种格式
SERVER_PATTERN_LOOSE = r'- \[(.*?)\]\((.*?)\)(.*)'
# 带锚点的分类标题模式
CATEGORY_PATTERN = r'### [📂🎨☁️🖥️💬👤🗄️💻🧮🎮🧠🗺️🎯📊🔎🔒🏃🚆🔄🛠️]* <a name="(.*?)"></a>(.*)'
# 备用分类模式
CATEGORY_PATTERN_ALT = r'### (.+) <a name="(.*?)"></a>'
# 简单分类模式
CATEGORY_PATTERN_SIMPLE = r'^#{1,3}\s+(.+)$'
# 中英文双语分类模式（带锚点版）- 用于匹配README-zh.md中的分类格式
CATEGORY_PATTERN_ZH = r'### [📂🎨☁️🖥️💬👤🗄️💻🧮🎮🧠🗺️🎯📊🔎🔒🏃🚆🔄🛠️]* <a name="([^"]+)"></a>([^A-Za-z\d\s]*[^\x00-\x7F][^<]*)'

class MCPServerParser:
    def __init__(self, readme_path, output_json=None, db_path=None):
        """初始化解析器"""
        self.readme_path = readme_path
        self.output_json = output_json
        self.db_path = db_path
        self.servers = []
        self.current_category = None
        self.skipped_lines = []
        self.total_lines = 0
        self.matched_lines = 0
        self.encodings = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'gb18030', 'latin1']
    
    def parse(self):
        """解析README文件"""
        logger.info(f"开始解析 {self.readme_path}")
        
        # 尝试不同的编码方式读取文件
        content = None
        used_encoding = None
        
        for encoding in self.encodings:
            try:
                with open(self.readme_path, 'r', encoding=encoding) as f:
                    content = f.read()
                    used_encoding = encoding
                    logger.info(f"使用 {encoding} 编码成功读取文件")
                    # 如果内容中包含方括号，我们认为可能有链接
                    if '[' in content and ']' in content:
                        break
            except UnicodeDecodeError:
                continue
        
        if not content:
            logger.error("无法读取文件，已尝试所有编码")
            return []
        
        try:
            # 解析 Server 列表部分
            lines = content.split('\n')
            self.total_lines = len(lines)
            
            # 预处理：先完成分类的提取
            # 针对README-zh.md文件中特定的分类格式进行处理
            categories = {}
            for line_num, line in enumerate(lines, 1):
                # 尝试匹配README-zh.md中的分类定义部分
                # 例如: * 📂 - [浏览器自动化](#browser-automation)
                category_def_match = re.search(r'\* ([📂🎨☁️🖥️💬👤🗄️💻🧮🎮🧠🗺️🎯📊🔎🔒🏃🚆🔄🛠️]*) - \[(.*?)\]\(#(.*?)\)', line)
                if category_def_match:
                    emoji = category_def_match.group(1).strip()
                    category_name = category_def_match.group(2).strip()
                    category_id = category_def_match.group(3)
                    categories[category_id] = {
                        'id': category_id,
                        'name': category_name,
                        'emoji': emoji
                    }
                    logger.debug(f"预处理 - 第 {line_num} 行: 定义分类 {category_id}: {category_name}")
            
            # 后续分类匹配
            for line_num, line in enumerate(lines, 1):
                # 尝试匹配分类标题
                # 例如: ### 📂 <a name="browser-automation"></a>浏览器自动化
                category_header_match = re.search(r'### ([📂🎨☁️🖥️💬👤🗄️💻🧮🎮🧠🗺️🎯📊🔎🔒🏃🚆🔄🛠️]*) <a name="([^"]+)"></a>(.*)', line)
                if category_header_match:
                    emoji = category_header_match.group(1).strip()
                    category_id = category_header_match.group(2)
                    category_name = category_header_match.group(3).strip()
                    
                    # 如果分类已经在定义部分提取过了，使用这里的中文名称
                    if category_id in categories:
                        categories[category_id]['name'] = category_name
                    else:
                        categories[category_id] = {
                            'id': category_id,
                            'name': category_name,
                            'emoji': emoji
                        }
                    logger.debug(f"预处理 - 第 {line_num} 行: 分类标题 {category_id}: {category_name}")
            
            # 默认认为 Server 列表在整个文件中
            in_server_section = False
            current_category_id = None
            
            for line_num, line in enumerate(lines, 1):
                # 尝试修复可能损坏的编码
                try:
                    line = line.strip()
                except:
                    line = str(line).strip()
                
                if not line:
                    continue
                
                # 检查是否进入 Server 列表部分
                if "##  Server 列表" in line or "## Server List" in line or "##  Server 实现" in line:
                    in_server_section = True
                    logger.info(f"第 {line_num} 行: 进入 Server 列表部分")
                    continue
                
                if not in_server_section:
                    continue
                
                # 检查是否是分类标题
                category_header_match = re.search(r'### [📂🎨☁️🖥️💬👤🗄️💻🧮🎮🧠🗺️🎯📊🔎🔒🏃🚆🔄🛠️]* <a name="([^"]+)"></a>', line)
                if category_header_match:
                    current_category_id = category_header_match.group(1)
                    if current_category_id in categories:
                        self.current_category = categories[current_category_id]
                        logger.debug(f"第 {line_num} 行: 切换到分类: {self.current_category}")
                    else:
                        # 如果找不到预定义的分类，则尝试从当前行提取
                        name_match = re.search(r'</a>(.*?)$', line)
                        category_name = name_match.group(1).strip() if name_match else "未知分类"
                        self.current_category = {
                            'id': current_category_id,
                            'name': category_name
                        }
                        logger.debug(f"第 {line_num} 行: 切换到未预定义分类: {self.current_category}")
                    continue
                
                # 如果没有找到任何分类，但已经处理了一些行，创建一个默认分类
                if not self.current_category and line_num > 10:
                    self.current_category = {
                        'id': 'uncategorized',
                        'name': '未分类'
                    }
                    logger.warning(f"未找到分类，创建默认分类: {self.current_category}")
                
                # 解析 Server 链接行
                if self.current_category and line.startswith('-') and '[' in line and ']' in line and '(' in line:
                    # 使用更宽松的匹配来获取 Server 信息
                    server_match = re.search(r'- \[(.*?)\]\((.*?)\)(.*)', line)
                    if server_match:
                        repo_name = server_match.group(1)
                        repo_url = server_match.group(2)
                        
                        # 尝试提取描述
                        description = ""
                        rest = server_match.group(3).strip()
                        
                        # 检查是否有图标和描述
                        desc_match = re.search(r'(?:[🎖️🐍📇🏎️🦀#️⃣☕☁️🏠🍎🪟🐧]*)?\s*-\s*(.*)', rest)
                        if desc_match:
                            description = desc_match.group(1).strip()
                        
                        # 解析图标
                        is_official = '🎖️' in line
                        language = self._parse_language(line)
                        scope = self._parse_scope(line)
                        os = self._parse_os(line)
                        
                        # 添加 Server 
                        self._add_server(line_num, repo_name, repo_url, is_official, language, scope, os, description)
                        self.matched_lines += 1
                        continue
                    else:
                        # 如果以上模式无法匹配，尝试简单提取
                        bracket_start = line.find('[')
                        bracket_end = line.find(']', bracket_start)
                        paren_start = line.find('(', bracket_end)
                        paren_end = line.find(')', paren_start)
                        
                        if bracket_start != -1 and bracket_end != -1 and paren_start != -1 and paren_end != -1:
                            repo_name = line[bracket_start+1:bracket_end]
                            repo_url = line[paren_start+1:paren_end]
                            
                            # 提取描述
                            description = ""
                            rest = line[paren_end+1:].strip()
                            dash_pos = rest.find('-')
                            if dash_pos != -1:
                                description = rest[dash_pos+1:].strip()
                            
                            # 解析图标
                            is_official = '🎖️' in line
                            language = self._parse_language(line)
                            scope = self._parse_scope(line)
                            os = self._parse_os(line)
                            
                            # 添加 Server 
                            self._add_server(line_num, repo_name, repo_url, is_official, language, scope, os, description)
                            self.matched_lines += 1
                            continue
                    
                    # 如果无法解析，记录跳过
                    self.skipped_lines.append((line_num, line, "无法匹配链接格式"))
            
            # 如果没有找到任何 Server ，使用备用方法
            if not self.servers:
                logger.warning("未通过常规方式找到 Server ，尝试备用方法")
                # 重新进行二进制内容分析提取链接
                try:
                    with open(self.readme_path, 'rb') as f:
                        binary_content = f.read()
                    
                    # GitHub链接格式
                    github_links = re.findall(b'- \\[([^\\]]+)\\]\\(https://github\\.com/([^\\)]+)\\)', binary_content)
                    
                    if github_links:
                        logger.info(f"在二进制内容中找到 {len(github_links)} 个GitHub链接")
                        
                        # 创建默认分类
                        self.current_category = {
                            'id': 'uncategorized',
                            'name': '未分类'
                        }
                        
                        # 为每个链接创建条目
                        for i, (repo_name_bytes, repo_url_bytes) in enumerate(github_links):
                            try:
                                repo_name = repo_name_bytes.decode('utf-8', errors='ignore')
                                repo_url = f"https://github.com/{repo_url_bytes.decode('utf-8', errors='ignore')}"
                                
                                # 添加 Server （行号使用位置索引）
                                self._add_server(i+1, repo_name, repo_url, False, [], [], [], f"Auto-extracted: {repo_name}")
                            except Exception as e:
                                logger.warning(f"处理二进制链接时出错: {str(e)}")
                    
                    logger.info(f"从二进制内容中成功提取 {len(self.servers)} 个 Server ")
                except Exception as e:
                    logger.warning(f"二进制链接提取失败: {str(e)}")
        
        except Exception as e:
            logger.error(f"解析错误: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
        
        logger.info(f"共解析 {len(self.servers)} 个MCP Server ")
        logger.info(f"匹配的行数: {self.matched_lines}")
        logger.info(f"跳过的行数: {len(self.skipped_lines)}")
        
        # 输出跳过的行
        if self.skipped_lines:
            logger.info("跳过的行:")
            for line_num, line, reason in self.skipped_lines[:10]:  # 只显示前10行
                logger.info(f"  第 {line_num} 行 ({reason}): {line}")
            if len(self.skipped_lines) > 10:
                logger.info(f"  ... 还有 {len(self.skipped_lines) - 10} 行被跳过")
                
        # 验证解析结果
        self._validate_results()
                
        return self.servers
    
    def _add_server(self, line_num, repo_name, repo_url, is_official, language, scope, os, description):
        """添加 Server 到列表"""
        # 提取GitHub用户名和仓库名
        github_parts = repo_name.split('/')
        provider = github_parts[0] if len(github_parts) > 1 else repo_name
        name = github_parts[-1] if len(github_parts) > 1 else repo_name
        
        # 为每个 Server 生成唯一ID
        server_id = f"{provider}-{name}".lower().replace('@', '')
        
        # 检查是否已存在相同ID的 Server ，如果有则添加序号
        existing_server_id = server_id
        counter = 1
        while existing_server_id in [s['id'] for s in self.servers]:
            existing_server_id = f"{server_id}-{counter}"
            counter += 1
        server_id = existing_server_id
        
        server = {
            'id': server_id,
            'name': name,
            'provider': provider,
            'description': description,
            'url': repo_url,
            'github': repo_url if 'github.com' in repo_url else None,
            'is_official': is_official,
            'language': language,
            'scope': scope,
            'os': os,
            'category': self.current_category['id'],
            'category_name': self.current_category['name'],
            'tags': [self.current_category['id']] + (language if language else []) + (scope if scope else []) + (os if os else [])
        }
        
        self.servers.append(server)
        logger.debug(f"第 {line_num} 行: 添加 Server : {server['name']}")
    
    def _validate_results(self):
        """验证解析结果的完整性"""
        # 检查每个分类是否都有 Server 
        categories = set()
        for server in self.servers:
            categories.add(server['category'])
        
        logger.info(f"共解析 {len(categories)} 个分类")
        
        # 检查是否有重复ID
        ids = [s['id'] for s in self.servers]
        unique_ids = set(ids)
        if len(ids) != len(unique_ids):
            logger.warning(f"有 {len(ids) - len(unique_ids)} 个重复ID")
            duplicates = [id for id in unique_ids if ids.count(id) > 1]
            logger.warning(f"重复的ID: {duplicates[:5]}")
        
        # 检查每个 Server 是否都有必要的字段
        missing_fields = []
        for server in self.servers:
            if not server['name'] or not server['provider'] or not server['url']:
                missing_fields.append(server['id'])
        
        if missing_fields:
            logger.warning(f"有 {len(missing_fields)} 个 Server 缺少必要字段")
            logger.warning(f"缺少字段的 Server ID: {missing_fields[:5]}")
    
    def _parse_language(self, text):
        """解析编程语言图标"""
        language_map = {
            '🐍': 'python',
            '📇': 'typescript',
            '🏎️': 'go',
            '🦀': 'rust',
            '#️⃣': 'csharp',
            '☕': 'java'
        }
        
        # 如果是字符串，检查每个字符是否为语言图标
        if isinstance(text, str):
            for char in text:
                if char in language_map:
                    return [language_map[char]]
        return []
    
    def _parse_scope(self, text):
        """解析范围图标"""
        scope_map = {
            '☁️': 'cloud',
            '🏠': 'local'
        }
        
        # 如果是字符串，检查每个字符是否为范围图标
        if isinstance(text, str):
            for char in text:
                if char in scope_map:
                    return [scope_map[char]]
        return []
    
    def _parse_os(self, text):
        """解析操作系统图标"""
        os_map = {
            '🍎': 'macos',
            '🪟': 'windows',
            '🐧': 'linux'
        }
        
        # 如果是字符串，检查每个字符是否为操作系统图标
        if isinstance(text, str):
            for char in text:
                if char in os_map:
                    return [os_map[char]]
        return []
    
    def save_json(self):
        """保存为JSON文件"""
        if not self.output_json:
            return False
        
        try:
            output_dir = os.path.dirname(self.output_json)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir)
                
            with open(self.output_json, 'w', encoding='utf-8') as f:
                json.dump({
                    'count': len(self.servers),
                    'last_updated': datetime.now().isoformat(),
                    'servers': self.servers
                }, f, ensure_ascii=False, indent=2)
            
            logger.info(f"成功保存JSON到 {self.output_json}")
            return True
        
        except Exception as e:
            logger.error(f"保存JSON失败: {str(e)}")
            return False
    
    def save_to_db(self):
        """保存到SQLite数据库"""
        if not self.db_path:
            return False
        
        try:
            conn = sqlite3.connect(self.db_path)
            cursor = conn.cursor()
            
            # 使用与Django模型对应的表名
            # 表名应该是 api_mcpserverfromreadme 而不是 mcp_servers
            cursor.execute('''
            CREATE TABLE IF NOT EXISTS api_mcpserverfromreadme (
                id TEXT PRIMARY KEY,
                name TEXT NOT NULL,
                provider TEXT NOT NULL,
                description TEXT,
                url TEXT,
                github TEXT,
                is_official INTEGER,
                category TEXT,
                category_name TEXT,
                tags TEXT,
                language TEXT,
                scope TEXT,
                os TEXT,
                readme_data TEXT
            )
            ''')
            
            # 创建标签表，使用Django模型对应的表名
            cursor.execute('''
            CREATE TABLE IF NOT EXISTS api_mcpservertag (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                server_id TEXT,
                tag TEXT,
                FOREIGN KEY (server_id) REFERENCES api_mcpserverfromreadme(id) ON DELETE CASCADE,
                UNIQUE(server_id, tag)
            )
            ''')
            
            # 获取现有 Server ID
            cursor.execute("SELECT id FROM api_mcpserverfromreadme")
            existing_ids = set([row[0] for row in cursor.fetchall()])
            
            # 开始事务
            conn.execute("BEGIN TRANSACTION")
            
            # 统计新增和更新的 Server 数量
            new_count = 0
            updated_count = 0
            
            for server in self.servers:
                server_id = server['id']
                
                # 将列表转换为JSON字符串
                tags_json = json.dumps(server['tags'], ensure_ascii=False)
                language_json = json.dumps(server['language'], ensure_ascii=False)
                scope_json = json.dumps(server['scope'], ensure_ascii=False)
                os_json = json.dumps(server['os'], ensure_ascii=False)
                
                # 保存整个 Server 数据作为JSON
                server_json = json.dumps(server, ensure_ascii=False)
                
                # 使用REPLACE INTO替代INSERT，可以处理重复记录
                cursor.execute('''
                REPLACE INTO api_mcpserverfromreadme (
                    id, name, provider, description, url, github,
                    is_official, category, category_name, tags,
                    language, scope, os, readme_data
                ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
                ''', (
                    server_id,
                    server['name'],
                    server['provider'],
                    server['description'],
                    server['url'],
                    server['github'],
                    1 if server['is_official'] else 0,
                    server['category'],
                    server['category_name'],
                    tags_json,
                    language_json,
                    scope_json,
                    os_json,
                    server_json
                ))
                
                if server_id in existing_ids:
                    updated_count += 1
                else:
                    new_count += 1
                
                # 删除旧标签
                cursor.execute("DELETE FROM api_mcpservertag WHERE server_id = ?", (server_id,))
                
                # 添加新标签
                for tag in server['tags']:
                    cursor.execute('''
                    INSERT OR REPLACE INTO api_mcpservertag (server_id, tag)
                    VALUES (?, ?)
                    ''', (server_id, tag))
            
            # 提交事务
            conn.commit()
            conn.close()
            
            logger.info(f"成功保存到数据库 {self.db_path}")
            logger.info(f"新增: {new_count}, 更新: {updated_count}")
            return True
        
        except Exception as e:
            logger.error(f"保存到数据库失败: {str(e)}")
            if 'conn' in locals():
                conn.rollback()
                conn.close()
            return False

def check_readme_file(readme_path):
    """检查README文件是否存在并有效"""
    if not os.path.exists(readme_path):
        logger.error(f"README文件不存在: {readme_path}")
        return False
    
    logger.info(f"README文件大小: {os.path.getsize(readme_path)} 字节")
    
    # 尝试不同的编码方式读取文件
    encodings = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'gb18030', 'latin1']
    content = None
    
    for encoding in encodings:
        try:
            with open(readme_path, 'r', encoding=encoding) as f:
                content = f.read()
                logger.info(f"成功使用 {encoding} 编码读取文件")
                
                # 显示简单的统计信息
                lines = content.split('\n')
                logger.info(f"文件行数: {len(lines)}")
                logger.info(f"文件中包含 '#' 的行数: {sum(1 for line in lines if '#' in line)}")
                logger.info(f"文件中包含 '-' 的行数: {sum(1 for line in lines if line.strip().startswith('-'))}")
                logger.info(f"文件中包含 '[' 的行数: {sum(1 for line in lines if '[' in line)}")
                logger.info(f"文件中包含 '](' 的行数: {sum(1 for line in lines if '](' in line)}")
                
                # 如果成功读取并包含足够的信息，就返回
                if '[' in content and '(' in content:
                    break
        except UnicodeDecodeError:
            logger.warning(f"使用 {encoding} 编码读取文件失败")
            continue
    
    if not content:
        logger.error(f"无法使用任何编码正确读取README文件")
        return False
        
    # 检查文件是否为空
    if not content.strip():
        logger.error(f"README文件为空")
        return False
        
    # 检查文件是否包含基本的Markdown格式
    if not '# ' in content and not '## ' in content:
        logger.error(f"README文件不是Markdown格式")
        return False
        
    # 检查是否有链接 - 使用更宽松的检查
    if not '[' in content or not '(' in content:
        logger.error(f"README文件不包含任何链接")
        # 输出一些帮助调试的信息
        logger.error("以下是文件的前100个字符：")
        logger.error(repr(content[:100]))
        return False
        
    # 显示前几行以帮助调试
    lines = content.split('\n')
    logger.info("文件前10行:")
    for i, line in enumerate(lines[:10]):
        logger.info(f"  {i+1}: {line}")
    
    # 尝试找出 Server 列表部分
    server_section_start = None
    for i, line in enumerate(lines):
        if "##  Server 列表" in line or "## Server List" in line:
            server_section_start = i
            break
    
    if server_section_start:
        logger.info(f"找到 Server 列表部分，从第 {server_section_start+1} 行开始")
        # 显示 Server 列表部分的前几行
        logger.info(" Server 列表部分的前5行:")
        for i in range(server_section_start, min(server_section_start + 5, len(lines))):
            logger.info(f"  {i+1}: {lines[i]}")
    else:
        logger.warning("未找到 Server 列表部分")
        
    return True

def main():
    parser = argparse.ArgumentParser(description='解析MCP Server README文件')
    parser.add_argument('--readme', required=True, help='README-zh.md文件路径')
    parser.add_argument('--output', help='输出JSON文件路径')
    parser.add_argument('--db', help='SQLite数据库文件路径')
    parser.add_argument('--verbose', '-v', action='store_true', help='显示详细日志')
    parser.add_argument('--dump-skipped', action='store_true', help='将跳过的行保存到文件')
    parser.add_argument('--check-only', action='store_true', help='只检查README文件有效性，不执行解析')
    
    args = parser.parse_args()
    
    if args.verbose:
        logger.setLevel(logging.DEBUG)
    
    # 检查文件是否存在
    if not check_readme_file(args.readme):
        return 1
        
    # 如果只是检查，到此为止
    if args.check_only:
        return 0
    
    # 默认输出路径
    if not args.output and not args.db:
        args.output = os.path.join(os.path.dirname(args.readme), 'mcp_servers.json')
        logger.info(f"未指定输出路径，使用默认路径: {args.output}")
    
    # 创建解析器并执行
    parser = MCPServerParser(
        readme_path=args.readme,
        output_json=args.output,
        db_path=args.db
    )
    
    try:
        # 解析文件
        servers = parser.parse()
        
        if not servers:
            logger.error("未能解析到任何 Server ，请检查README文件格式或脚本参数")
            return 1
            
        logger.info(f"共解析 {len(servers)} 个MCP Server ")
        
        # 保存解析统计信息
        stats = {
            'total_servers': len(servers),
            'categories': len(set(s['category'] for s in servers)),
            'skipped_lines': len(parser.skipped_lines),
            'matched_lines': parser.matched_lines,
            'total_lines': parser.total_lines,
            'languages': {},
            'scopes': {},
            'os': {}
        }
        
        # 计算语言、范围和操作系统统计
        for server in servers:
            for lang in server['language']:
                stats['languages'][lang] = stats['languages'].get(lang, 0) + 1
            for scope_item in server['scope']:
                stats['scopes'][scope_item] = stats['scopes'].get(scope_item, 0) + 1
            for os_item in server['os']:
                stats['os'][os_item] = stats['os'].get(os_item, 0) + 1
        
        # 输出统计信息
        logger.info(f"分类数量: {stats['categories']}")
        logger.info(f"编程语言统计: {stats['languages']}")
        logger.info(f"范围统计: {stats['scopes']}")
        logger.info(f"操作系统统计: {stats['os']}")
        
        # 保存跳过的行
        if args.dump_skipped and parser.skipped_lines:
            skipped_file = os.path.join(os.path.dirname(args.output or args.db), 'skipped_lines.txt')
            with open(skipped_file, 'w', encoding='utf-8') as f:
                f.write(f"共跳过 {len(parser.skipped_lines)} 行\n\n")
                for line_num, line, reason in parser.skipped_lines:
                    f.write(f"第 {line_num} 行 ({reason}):\n{line}\n\n")
            logger.info(f"跳过的行已保存到: {skipped_file}")
        
        # 保存JSON
        if args.output:
            # 添加统计信息到JSON
            json_data = {
                'count': len(servers),
                'last_updated': datetime.now().isoformat(),
                'stats': stats,
                'servers': servers
            }
            
            output_dir = os.path.dirname(args.output)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir)
                
            with open(args.output, 'w', encoding='utf-8') as f:
                json.dump(json_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"JSON文件已保存: {args.output}")
        
        # 保存到数据库
        if args.db:
            if parser.save_to_db():
                logger.info(f"数据已保存到数据库: {args.db}")
            else:
                logger.error(f"保存到数据库失败")
        
        return 0
    
    except Exception as e:
        logger.error(f"执行失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return 1


if __name__ == '__main__':
    exit(main()) 