import os
import json
import requests
from typing import Dict, Any, Optional, List
import time
from datetime import datetime
from dotenv import load_dotenv
import random
from requests.adapters import HTTPAdapter
import urllib3
from urllib3.util.retry import Retry
import glob
import re
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
import logging  # 添加日志模块

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler("semantic_scholar_crawler.log")
    ]
)
logger = logging.getLogger(__name__)

class SemanticScholarCrawler:
    def __init__(self, status_callback=None, logger=None):
        self.status_callback = status_callback
        self.logger = logger or logging.getLogger(__name__)
        
        # 状态管理
        self.is_running = False
        self.processed_count = 0
        self.total_count = 0
        self.current_author = ""
        self.start_time = None
        self.stop_requested = False
        
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'application/json',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Cache-Control': 'max-age=0'
        }
        # 加载环境变量
        load_dotenv()
        
        # Semantic Scholar API配置
        self.base_url = "https://api.semanticscholar.org/graph/v1"
        self.api_key = os.getenv("SEMANTIC_SCHOLAR_API_KEY")
        if self.api_key:
            self.headers['x-api-key'] = self.api_key
            self.logger.info(" Semantic Scholar API Key已配置")
        else:
            self.logger.warning(" 未配置API Key，将使用免费访问模式")
            self.logger.warning("注意：免费模式有访问频率限制，建议在请求间添加延迟")
        
        self.session = self._create_session()
        self.logger.info(f"初始化完成，API模式: {'付费' if self.api_key else '免费'}")

    def update_status(self, **kwargs):
        """更新爬虫状态"""
        for key, value in kwargs.items():
            if hasattr(self, key):
                setattr(self, key, value)
        
        if self.status_callback:
            status_data = {
                'crawler_type': 'semantic_scholar',
                'is_running': self.is_running,
                'processed_count': self.processed_count,
                'total_count': self.total_count,
                'current_author': self.current_author,
                'start_time': self.start_time.isoformat() if self.start_time else None,
                'status': 'running' if self.is_running else 'idle'
            }
            self.status_callback(status_data)

    def stop(self):
        """停止爬虫"""
        self.stop_requested = True
        self.is_running = False
        self.logger.info(" Semantic Scholar爬虫停止请求已接收")

    def _create_session(self):
        """创建带有重试机制的会话"""
        session = requests.Session()
        
        # 根据是否有API key调整重试策略
        if self.api_key:
            # 有API key时使用较短的重试间隔
            retry = Retry(
                total=5,
                backoff_factor=0.5,
                status_forcelist=[429, 500, 502, 503, 504]
            )
        else:
            # 免费模式使用更长的重试间隔
            retry = Retry(
                total=3,
                backoff_factor=2.0,
                status_forcelist=[429, 500, 502, 503, 504]
            )
        
        adapter = HTTPAdapter(max_retries=retry)
        session.mount('http://', adapter)
        session.mount('https://', adapter)
        session.headers.update(self.headers)
        return session

    def search_author(self, author_name: str) -> List[Dict[str, Any]]:
        """搜索作者信息"""
        self.logger.info(f" 搜索Semantic Scholar作者: {author_name}")
        start_time = time.time()
        
        try:
            # 免费模式添加延迟
            if not self.api_key:
                delay = 0.5  # 固定0.5秒延迟
                self.logger.info(f" 免费模式等待 {delay:.1f} 秒...")
                time.sleep(delay)
            
            # 构建搜索URL
            search_url = f"{self.base_url}/author/search"
            params = {
                "query": author_name,
                "limit": 10,
                "fields": "authorId,name,url,affiliations,homepage,paperCount,citationCount,hIndex"
            }
            
            self.logger.info(f"发送请求: 搜索作者: {author_name}")
            response = self.session.get(search_url, params=params, timeout=30)
            response.raise_for_status()
            
            data = response.json()
            authors = data.get('data', [])
            
            if not authors:
                self.logger.warning(" 未找到匹配的作者")
                return []
            
            self.logger.info(f"找到 {len(authors)} 个潜在匹配作者")
            
            # 记录搜索到的作者信息
            for idx, author in enumerate(authors, 1):
                self.logger.info(f"  {idx}. {author.get('name', '未知')} (ID: {author.get('authorId', '未知')})")
            
            process_time = time.time() - start_time
            self.logger.info(f" 作者搜索完成! 用时: {process_time:.2f}秒")
            
            return authors
            
        except requests.exceptions.HTTPError as e:
            process_time = time.time() - start_time
            self.logger.error(f" HTTP错误: {e.response.status_code} - {e.response.text}")
            self.logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return []
        except Exception as e:
            process_time = time.time() - start_time
            self.logger.error(f" 搜索错误: {str(e)}")
            self.logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return []

    def get_author_details(self, author_id: str) -> Optional[Dict[str, Any]]:
        """获取作者详细信息"""
        self.logger.info(f" 获取作者详细信息 - ID: {author_id}")
        start_time = time.time()
        
        try:
            # 免费模式添加延迟
            if not self.api_key:
                delay = 0.5  # 固定0.5秒延迟
                self.logger.info(f" 免费模式等待 {delay:.1f} 秒...")
                time.sleep(delay)
            
            # 构建作者详情URL - 只使用合法的作者字段
            author_url = f"{self.base_url}/author/{author_id}"
            params = {
                "fields": "authorId,name,url,affiliations,homepage,paperCount,citationCount,hIndex"
            }
            
            self.logger.info(f"发送请求: 获取作者详情: {author_id}")
            response = self.session.get(author_url, params=params, timeout=30)
            response.raise_for_status()
            
            author_data = response.json()
            
            # 输出获取到的作者信息
            self.logger.info(f"获取到作者: {author_data.get('name', '未知')}")
            self.logger.info(f"  论文数: {author_data.get('paperCount', 0)}")
            self.logger.info(f"  引用数: {author_data.get('citationCount', 0)}")
            self.logger.info(f"  h指数: {author_data.get('hIndex', 0)}")
            
            process_time = time.time() - start_time
            self.logger.info(f" 作者详情获取完成! 用时: {process_time:.2f}秒")
            
            return author_data
            
        except requests.exceptions.HTTPError as e:
            process_time = time.time() - start_time
            self.logger.error(f" HTTP错误: {e.response.status_code} - {e.response.text}")
            self.logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return None
        except Exception as e:
            process_time = time.time() - start_time
            self.logger.error(f" 获取详情错误: {str(e)}")
            self.logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return None

    def get_author_papers(self, author_id: str, limit: int = 50) -> List[Dict[str, Any]]:
        """获取作者的论文列表"""
        logger.info(f" 获取作者论文 - ID: {author_id}")
        start_time = time.time()
        
        try:
            # 免费模式添加延迟
            if not self.api_key:
                delay = 0.5  # 固定0.5秒延迟
                logger.info(f" 免费模式等待 {delay:.1f} 秒...")
                time.sleep(delay)
            
            # 构建论文列表URL
            papers_url = f"{self.base_url}/author/{author_id}/papers"
            params = {
                "limit": limit,
                "fields": "paperId,title,abstract,year,venue,citationCount,openAccessPdf,authors,publicationDate,publicationVenue,publicationTypes,fieldsOfStudy"
            }
            
            logger.info(f"发送请求: 获取作者论文: {author_id}")
            response = self.session.get(papers_url, params=params, timeout=30)
            response.raise_for_status()
            
            data = response.json()
            papers = data.get('data', [])
            
            if not papers:
                logger.warning(" 未找到相关论文")
                return []
            
            logger.info(f"找到 {len(papers)} 篇论文")
            
            # 输出部分论文信息
            for i, paper in enumerate(papers[:3], 1):
                logger.info(f"  {i}. {paper.get('title', '无标题')} ({paper.get('year', '未知年份')})")
            
            if len(papers) > 3:
                logger.info(f"  ... 及其他 {len(papers)-3} 篇论文")
            
            process_time = time.time() - start_time
            logger.info(f" 论文获取完成! 用时: {process_time:.2f}秒")
            
            return papers
            
        except requests.exceptions.HTTPError as e:
            process_time = time.time() - start_time
            logger.error(f" HTTP错误: {e.response.status_code} - {e.response.text}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return []
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f" 获取论文错误: {str(e)}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return []

    def get_paper_details(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """获取论文详细信息"""
        logger.info(f" 获取论文详情 - ID: {paper_id}")
        start_time = time.time()
        
        try:
            # 构建论文详情URL
            paper_url = f"{self.base_url}/paper/{paper_id}"
            params = {
                "fields": "paperId,title,abstract,year,venue,citationCount,openAccessPdf,authors,publicationDate,publicationVenue,publicationTypes,fieldsOfStudy,embedding,topics,externalIds"
            }
            
            logger.info(f"发送请求: 获取论文详情: {paper_id}")
            response = self.session.get(paper_url, params=params, timeout=30)
            response.raise_for_status()
            
            paper_data = response.json()
            
            # 输出论文基本信息
            logger.info(f"获取到论文: {paper_data.get('title', '无标题')}")
            logger.info(f"  年份: {paper_data.get('year', '未知')}")
            logger.info(f"  引用数: {paper_data.get('citationCount', 0)}")
            
            process_time = time.time() - start_time
            logger.info(f" 论文详情获取完成! 用时: {process_time:.2f}秒")
            
            return paper_data
            
        except requests.exceptions.HTTPError as e:
            process_time = time.time() - start_time
            logger.error(f" HTTP错误: {e.response.status_code} - {e.response.text}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return None
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f" 获取论文详情错误: {str(e)}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return None

    def get_author_references(self, author_id: str, limit: int = 50) -> List[Dict[str, Any]]:
        """获取作者引用的论文"""
        logger.info(f"获取作者引用 - ID: {author_id}")
        start_time = time.time()
        
        try:
            # 免费模式添加延迟
            if not self.api_key:
                delay = 0.5  # 固定0.5秒延迟
                logger.info(f" 免费模式等待 {delay:.1f} 秒...")
                time.sleep(delay)
            
            # 构建引用信息URL（使用papers端点而不是citations）
            references_url = f"{self.base_url}/author/{author_id}/papers"
            params = {
                "limit": limit,
                "fields": "paperId,title,year,citationCount,authors"
            }
            
            logger.info(f"发送请求: 获取作者引用: {author_id}")
            response = self.session.get(references_url, params=params, timeout=30)
            response.raise_for_status()
            
            data = response.json()
            references = data.get('data', [])
            
            if not references:
                logger.warning(" 未找到引用信息")
                return []
            
            logger.info(f"找到 {len(references)} 条引用信息")
            
            process_time = time.time() - start_time
            logger.info(f" 引用信息获取完成! 用时: {process_time:.2f}秒")
            
            return references
            
        except requests.exceptions.HTTPError as e:
            process_time = time.time() - start_time
            logger.error(f" HTTP错误: {e.response.status_code} - {e.response.text}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return []
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f" 获取引用错误: {str(e)}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return []

    def _replace_null_values(self, data):
        """替换数据中的null值 - 保留None而不是转为空字符串"""
        if isinstance(data, dict):
            return {k: self._replace_null_values(v) for k, v in data.items()}
        elif isinstance(data, list):
            return [self._replace_null_values(item) for item in data]
        elif data is None:
            return None  # 保留None值而不是空字符串
        else:
            return data

    def _handle_rate_limit(self, response):
        """处理API频率限制"""
        if response.status_code == 429:
            if not self.api_key:
                # 免费模式遇到限制时等待更长时间
                wait_time = random.uniform(60, 120)
                logger.warning(f" 遇到API限制，免费模式等待 {wait_time:.0f} 秒...")
                time.sleep(wait_time)
                return True
            else:
                # 有API key时等待较短时间
                wait_time = random.uniform(30, 60)
                logger.warning(f" 遇到API限制，等待 {wait_time:.0f} 秒...")
                time.sleep(wait_time)
                return True
        return False

    def save_crawled_data(self, author_name: str, doc_id: str, author_data: Dict, output_dir: str = '.') -> Optional[str]:
        """将爬取的数据保存到统一的JSON文件中"""
        try:
            # 创建输出目录
            os.makedirs(output_dir, exist_ok=True)
            output_path = os.path.join(output_dir, 'Results_Semantic_Scholar.json')
            
            # 清理数据中的null值
            cleaned_data = self._replace_null_values(author_data)
            
            # 读取现有数据（如果文件存在）
            existing_data = []
            if os.path.exists(output_path):
                try:
                    with open(output_path, 'r', encoding='utf-8') as f:
                        existing_data = json.load(f)
                except json.JSONDecodeError:
                    logger.warning(" JSON文件解析错误，创建新文件")
                    existing_data = []
            
            # 检查是否已存在相同的mongodb_id
            for item in existing_data:
                if item.get('metadata', {}).get('mongodb_id') == doc_id:
                    logger.warning(f" 跳过: 已存在相同的mongodb_id ({doc_id})")
                    return output_path
            
            # 构建新的数据项
            new_item = {
                "metadata": {
                    "created_at": datetime.now().isoformat(),
                    "updated_at": datetime.now().isoformat(),
                    "version": "1.0",
                    "mongodb_id": doc_id
                },
                "author_data": cleaned_data
            }
            
            # 添加新的数据项
            existing_data.append(new_item)
            
            # 按照mongodb_id排序
            existing_data.sort(key=lambda x: x.get('metadata', {}).get('mongodb_id', ''))
            
            # 保存数据
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(existing_data, f, ensure_ascii=False, indent=2)
            logger.info(f" 数据已保存到: {output_path}")
            
            return output_path
                
        except Exception as e:
            logger.error(f" 保存数据时出错: {str(e)}")
            return None

    def process_author(self, author_name: str) -> Optional[Dict[str, Any]]:
        """处理单个作者的完整信息"""
        logger.info(f"\n{'='*60}")
        logger.info(f" 开始处理作者: {author_name}")
        logger.info(f"{'='*60}")
        start_time = time.time()
        
        try:
            # 搜索作者
            authors = self.search_author(author_name)
            
            if not authors:
                logger.warning(" 未找到作者信息")
                return None
            
            # 选择第一个匹配的作者（可以根据需要调整匹配逻辑）
            selected_author = authors[0]
            author_id = selected_author.get('authorId')
            
            if not author_id:
                logger.warning(" 未找到作者ID")
                return None
            
            logger.info(f" 选择作者: {selected_author.get('name', '未知')} (ID: {author_id})")
            
            # 获取作者详细信息
            author_details = self.get_author_details(author_id)
            
            # 获取作者论文
            author_papers = self.get_author_papers(author_id)
            
            # 获取作者引用信息
            author_references = self.get_author_references(author_id)
            
            # 构建完整的作者数据
            author_data = {
                "name": author_name,
                "semantic_scholar_id": author_id,
                "sources": {
                    "semantic_scholar": {
                        "last_updated": datetime.now().isoformat(),
                        "author_details": author_details,
                        "papers_count": len(author_papers),
                        "papers": author_papers,
                        "references_count": len(author_references),
                        "references": author_references
                    }
                },
                "metadata": {
                    "created_at": datetime.now().isoformat(),
                    "updated_at": datetime.now().isoformat(),
                    "processed_at": datetime.now().isoformat()
                }
            }
            
            # 保存数据（对于单个作者处理，使用空字符串作为doc_id）
            self.save_crawled_data(author_name, "", author_data)
            
            process_time = time.time() - start_time
            logger.info(f" 作者处理完成! 用时: {process_time:.2f}秒")
            
            return author_data
            
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f" 处理作者时出错: {str(e)}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return None

    def process_json_file(self, json_file_path: str):
        """处理JSON文件中的作者信息"""
        logger.info(f" 处理文件: {json_file_path}")
        start_time = time.time()
        
        try:
            # 读取JSON文件
            with open(json_file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 提取作者姓名（根据实际JSON结构调整）
            author_name = None
            if isinstance(data, dict):
                # 尝试不同的可能的字段名
                possible_fields = ['name', '姓名', 'author_name', 'author', 'full_name']
                for field in possible_fields:
                    if field in data:
                        author_name = data[field]
                        break
                
                # 如果没有找到，尝试从其他字段提取
                if not author_name and 'raw_data' in data:
                    raw_data = data['raw_data']
                    for field in possible_fields:
                        if field in raw_data:
                            author_name = raw_data[field]
                            break
            
            if not author_name:
                logger.warning(" 无法从文件中提取作者姓名")
                return None
            
            logger.info(f" 提取到作者姓名: {author_name}")
            
            # 处理作者信息
            author_data = self.process_author(author_name)
            
            process_time = time.time() - start_time
            logger.info(f" 文件处理完成! 用时: {process_time:.2f}秒")
            
            return author_data
            
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f" 处理文件时出错: {str(e)}")
            logger.info(f"处理失败，用时: {process_time:.2f}秒")
            return None

    def process_all_json_files(self, directory='.'):
        """处理目录下的所有JSON文件"""
        # 获取目录下所有的JSON文件
        json_files = glob.glob(os.path.join(directory, '*.json'))
        total_files = len(json_files)
        
        logger.info(f" 开始处理目录: {directory}")
        logger.info(f"找到 {total_files} 个JSON文件待处理")
        logger.info(f"{'='*50}")
        
        for index, json_file in enumerate(json_files, 1):
            logger.info(f" 处理第 {index}/{total_files} 个文件")
            self.process_json_file(json_file)
            if index < total_files:
                # 统一使用0.5秒延迟
                delay = 0.5
                logger.info(f" 等待 {delay:.1f} 秒后处理下一个文件...")
                time.sleep(delay)
        
        logger.info(f" 所有文件处理完成!")
        logger.info(f"{'='*50}\n")

    def _get_mongodb_connection(self):
        """获取MongoDB连接"""
        username = "root"
        password = "pj9gbcht"
        host = "dbconn.sealosbja.site"
        port = 31140
        
        # 构建连接URI
        uri = f"mongodb://{username}:{password}@{host}:{port}/?directConnection=true"
        
        try:
            client = MongoClient(uri, serverSelectionTimeoutMS=3000)
            # 测试连接
            client.server_info()
            logger.info(" MongoDB连接成功")
            return client
        except ConnectionFailure:
            logger.error(" MongoDB连接失败")
            return None
        except Exception as e:
            logger.error(f" MongoDB连接错误: {str(e)}")
            return None

    def process_mongodb_documents(self, start_index: int = 1, stop_check=None):
        """处理MongoDB中的文档
        
        Args:
            start_index (int): 开始处理的文档索引（从1开始）
            stop_check (callable): 停止检查函数，返回True时停止处理
        """
        try:
            self.logger.info("开始处理MongoDB数据...")
            client = self._get_mongodb_connection()
            if not client:
                self.logger.error(" 无法连接MongoDB，终止处理")
                return
                
            db = client['LZQ']
            collection = db['LIST']
            
            # 获取所有文档
            cursor = collection.find({})
            documents = list(cursor)
            total_docs = len(documents)
            
            # 初始化状态
            self.total_count = total_docs
            self.processed_count = 0
            self.start_time = datetime.now()
            self.is_running = True
            self.stop_requested = False
            
            self.logger.info(f"找到 {total_docs} 个文档待处理")
            self.logger.info(f"将从第 {start_index} 个文档开始处理")
            self.logger.info(f"{'='*50}")
            self.update_status()
            
            # 读取现有的Results文件
            output_path = os.path.join('.', 'Results_Semantic_Scholar.json')
            existing_data = []
            existing_ids = set()
            
            if os.path.exists(output_path):
                try:
                    with open(output_path, 'r', encoding='utf-8') as f:
                        existing_data = json.load(f)
                    existing_ids = {item.get('metadata', {}).get('mongodb_id') for item in existing_data}
                    self.logger.info(f" 加载现有结果文件，包含 {len(existing_ids)} 条记录")
                except json.JSONDecodeError:
                    self.logger.warning(" 现有结果文件格式错误，将创建新文件")
            
            # 从指定索引开始处理
            success_count = 0
            for index, doc in enumerate(documents[start_index-1:], start_index):
                # 检查是否需要停止
                if self.stop_requested or (stop_check and stop_check()):
                    self.logger.info(" 收到停止信号，停止处理")
                    self.logger.info(f" 已处理 {self.processed_count} 个文档")
                    break
                
                try:
                    # 更新当前作者信息
                    raw_data = doc.get('raw_data', {})
                    author_name = raw_data.get('姓名', 'Unknown')
                    self.current_author = author_name
                    self.processed_count = index
                    self.update_status()
                    
                    self.logger.info(f" 处理第 {index}/{total_docs} 个文档")
                    
                    # 获取MongoDB文档ID
                    doc_id = str(doc.get('_id', ''))
                    
                    # 检查是否已存在
                    if doc_id in existing_ids:
                        self.logger.warning(f" 跳过: 文档ID {doc_id} 已存在于结果文件中")
                        continue
                    
                    if not author_name:
                        self.logger.warning(" 跳过: 未找到作者姓名")
                        continue
                        
                    self.logger.info(f" 处理作者: {author_name}")
                    self.logger.info(f" 文档ID: {doc_id}")
                    self.logger.info(f"{'='*50}")
                    
                    # 处理作者信息
                    author_data = self.process_author(author_name)
                    
                    if author_data:
                        # 添加MongoDB ID到元数据
                        if 'metadata' not in author_data:
                            author_data['metadata'] = {}
                        author_data['metadata']['mongodb_id'] = doc_id
                        
                        # 保存作者数据到统一的JSON文件
                        self.save_crawled_data(author_name, doc_id, author_data)
                        existing_ids.add(doc_id)  # 更新已处理ID集合
                        success_count += 1
                    
                    if index < total_docs:
                        # 统一使用0.5秒延迟
                        delay = 0.5
                        logger.info(f" 等待 {delay:.1f} 秒后处理下一个文档...")
                        time.sleep(delay)
                        
                except Exception as e:
                    logger.error(f" 处理文档时出错: {str(e)}")
                    processed_count += 1
                    continue
                    
            logger.info(f" 处理完成! 共处理 {processed_count} 个文档")
            
            # 检查是否所有文档都已处理完毕
            if processed_count == total_docs:
                logger.info(" 所有文档已经全部处理完毕！")
            else:
                logger.warning(f" 还有 {total_docs - processed_count} 个文档未处理")
                    
        except Exception as e:
            logger.error(f" MongoDB处理错误: {str(e)}")
        finally:
            if client:
                client.close()
                logger.info(" MongoDB连接已关闭")
def main():
    """主函数"""
    logger.info("="*50)
    logger.info(" Semantic Scholar 数据爬取工具")
    logger.info("="*50)
    
    crawler = SemanticScholarCrawler()
    
    try:
        # 可以选择处理单个作者、批量处理文件或处理MongoDB文档
        # 示例1: 处理单个作者
        # author_name = "Yann LeCun"
        # crawler.process_author(author_name)
        
        # 示例2: 处理所有JSON文件
        # crawler.process_all_json_files()
        
        # 示例3: 处理MongoDB文档（默认）
        crawler.process_mongodb_documents(start_index=1)
        
    except KeyboardInterrupt:
        logger.warning(" 用户中断程序执行")
    except Exception as e:
        logger.error(f" 程序执行出错: {str(e)}")
    finally:
        logger.info(" 程序执行完成")

if __name__ == "__main__":
    main()