import os
import json
import requests
from bs4 import BeautifulSoup
from typing import Dict, Any, Optional, List
import time
from datetime import datetime
import websocket
import hmac
import hashlib
import base64
from urllib.parse import urlencode, urlparse, quote
from wsgiref.handlers import format_date_time
from time import mktime
from dotenv import load_dotenv
from queue import Queue
import threading
import glob
import random
from requests.adapters import HTTPAdapter
import urllib3
from urllib3.util.retry import Retry
import pandas as pd
import re
from pymongo import MongoClient
import logging  # 新增日志模块

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler("scholar_crawler.log")
    ]
)
logger = logging.getLogger(__name__)

class ScholarCrawler:
    def __init__(self, status_callback=None, logger=None):
        self.status_callback = status_callback
        self.logger = logger or logging.getLogger(__name__)
        
        # 状态管理
        self.is_running = False
        self.processed_count = 0
        self.total_count = 0
        self.current_author = ""
        self.start_time = None
        self.stop_requested = False
        
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0'
        }
        self.session = self._create_session()
        
        # 加载环境变量
        load_dotenv()
        
        # IEEE API配置
        # self.ieee_api_key = os.getenv("IEEE_API_KEY", "qh5gmcrxh77uqjjpbyzzxw6d")
        # if self.ieee_api_key:
        #     self.logger.info("IEEE API Key已配置")
        # else:
        #     self.logger.warning("警告：未配置IEEE API Key，API访问可能受限")
            
        # ORCID API配置
        self.orcid_client_id = os.getenv("ORCID_CLIENT_ID", "APP-XSWH80IK4OJZJAQF")
        self.orcid_client_secret = os.getenv("ORCID_CLIENT_SECRET", "e20ab053-0efe-451e-88cb-e38144810221")
        self.orcid_redirect_uri = os.getenv("ORCID_REDIRECT_URI", "http://devbox2.ns-pjl1a1xu.svc.cluster.local:8080")
        
        # 获取ORCID访问令牌
        self.orcid_access_token = self._get_orcid_token()
        if self.orcid_access_token:
            self.logger.info("ORCID Token已配置")
        else:
            self.logger.warning("警告：未能获取ORCID Token，API访问可能受限")

    def update_status(self, **kwargs):
        """更新爬虫状态"""
        for key, value in kwargs.items():
            if hasattr(self, key):
                setattr(self, key, value)
        
        if self.status_callback:
            status_data = {
                'crawler_type': 'scholar',
                'is_running': self.is_running,
                'processed_count': self.processed_count,
                'total_count': self.total_count,
                'current_author': self.current_author,
                'start_time': self.start_time.isoformat() if self.start_time else None,
                'status': 'running' if self.is_running else 'idle'
            }
            self.status_callback(status_data)

    def stop(self):
        """停止爬虫"""
        self.stop_requested = True
        self.is_running = False
        self.logger.info("🛑 Scholar爬虫停止请求已接收")

    def _create_session(self):
        session = requests.Session()
        retry = Retry(
            total=5,
            backoff_factor=0.5,
            status_forcelist=[500, 502, 503, 504]
        )
        adapter = HTTPAdapter(max_retries=retry)
        session.mount('http://', adapter)
        session.mount('https://', adapter)
        session.headers.update(self.headers)
        return session

    def _get_orcid_token(self):
        """获取ORCID访问令牌"""
        try:
            token_url = 'https://orcid.org/oauth/token'
            headers = {
                'Accept': 'application/json',
                'Content-Type': 'application/x-www-form-urlencoded'
            }
            
            data = {
                'client_id': self.orcid_client_id,
                'client_secret': self.orcid_client_secret,
                'grant_type': 'client_credentials',
                'scope': '/read-public'
            }
            
            response = self.session.post(
                token_url,
                headers=headers,
                data=data,
                timeout=30,
                verify=True
            )
            response.raise_for_status()
            
            token_data = response.json()
            return token_data.get('access_token')
            
        except Exception as e:
            self.logger.error(f"获取ORCID Token失败: {str(e)}")
            return None 


    def get_arxiv_papers(self, author_name: str) -> List[Dict[str, Any]]:
        """获取arXiv论文信息"""
        logger.info(f"正在处理arXiv论文数据 - 作者：{author_name}")
        start_time = time.time()
        
        papers_data = []
        base_url = "http://export.arxiv.org/api/query"
        
        try:
            # 搜索作者论文
            logger.info(f"搜索作者论文: {author_name}")
            params = {
                "search_query": f"au:{author_name}",
                "start": 0,
                "max_results": 10,  # 限制只获取最新的10篇论文
                "sortBy": "submittedDate",
                "sortOrder": "descending"
            }
            
            response = self.session.get(base_url, params=params, timeout=30)
            response.raise_for_status()
            
            # 解析XML响应
            soup = BeautifulSoup(response.text, 'xml')
            entries = soup.find_all('entry')
            
            if not entries:
                logger.warning("未找到arXiv论文")
                return []
            
            logger.info(f"找到 {len(entries)} 篇arXiv论文")
            
            for entry in entries:
                try:
                    # 提取论文信息
                    title = entry.find('title').text.strip() if entry.find('title') else ''
                    summary = entry.find('summary').text.strip() if entry.find('summary') else ''
                    published = entry.find('published').text.strip() if entry.find('published') else ''
                    updated = entry.find('updated').text.strip() if entry.find('updated') else ''
                    id_elem = entry.find('id')
                    paper_id = id_elem.text.strip() if id_elem else ''
                    
                    # 提取作者信息
                    authors = []
                    author_entries = entry.find_all('author')
                    for author in author_entries:
                        author_name_elem = author.find('name')
                        if author_name_elem:
                            authors.append(author_name_elem.text.strip())
                    
                    # 提取分类信息
                    categories = []
                    category_elems = entry.find_all('category')
                    for category in category_elems:
                        term = category.get('term', '')
                        if term:
                            categories.append(term)
                    
                    # 提取链接
                    links = []
                    link_elems = entry.find_all('link')
                    for link in link_elems:
                        href = link.get('href', '')
                        title_attr = link.get('title', '')
                        rel = link.get('rel', '')
                        if href:
                            links.append({
                                'href': href,
                                'title': title_attr,
                                'rel': rel
                            })
                    
                    paper_info = {
                        'title': title,
                        'summary': summary[:500] + '...' if len(summary) > 500 else summary,
                        'authors': authors,
                        'published_date': published,
                        'updated_date': updated,
                        'paper_id': paper_id,
                        'categories': categories,
                        'links': links
                    }
                    
                    papers_data.append(paper_info)
                    
                except Exception as e:
                    logger.warning(f"处理论文时出错: {str(e)}")
                    continue
            
            process_time = time.time() - start_time
            if papers_data:
                logger.info(f"arXiv论文数据处理完成！找到 {len(papers_data)} 篇论文。用时：{process_time:.2f}秒")
            else:
                logger.info(f"未找到有效的arXiv论文数据。用时：{process_time:.2f}秒")
            
            return papers_data
            
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f"arXiv访问错误: {str(e)}")
            logger.info(f"处理失败，用时：{process_time:.2f}秒")
            return []

    def get_dblp_publications(self, author_name: str) -> List[Dict[str, Any]]:
        """获取DBLP出版物信息"""
        logger.info(f"正在处理DBLP出版物数据 - 作者：{author_name}")
        start_time = time.time()
        
        publications_data = []
        base_url = "https://dblp.org/search/publ/api"
        author_base_url = "https://dblp.org/search/author/api"
        
        try:
            # 搜索作者出版物
            logger.info(f"搜索作者出版物: {author_name}")
            params = {
                "q": author_name,
                "format": "json",
                "h": 10  # 限制只获取10个结果
            }
            
            response = self.session.get(base_url, params=params, timeout=30)
            response.raise_for_status()
            
            data = response.json()
            hits = data.get('result', {}).get('hits', {}).get('hit', [])
            
            if not hits:
                logger.warning("未找到DBLP出版物")
                return []
            
            logger.info(f"找到 {len(hits)} 个DBLP出版物")
            
            for hit in hits:
                try:
                    info = hit.get('info', {})
                    
                    # 提取出版物信息
                    title = info.get('title', '')
                    
                    # 正确处理作者信息
                    authors_data = info.get('authors', {}).get('author', [])
                    authors = []
                    if isinstance(authors_data, list):
                        for author in authors_data:
                            if isinstance(author, dict):
                                authors.append(author.get('text', ''))
                            else:
                                authors.append(str(author))
                    elif isinstance(authors_data, dict):
                        authors.append(authors_data.get('text', ''))
                    elif authors_data:
                        authors.append(str(authors_data))
                    
                    venue = info.get('venue', '')
                    year = info.get('year', '')
                    type_pub = info.get('type', '')
                    key = info.get('key', '')
                    doi = info.get('doi', '')
                    url = info.get('url', '')
                    
                    # 提取页面信息
                    pages = info.get('pages', '')
                    
                    # 提取电子版信息
                    ee = info.get('ee', [])
                    if isinstance(ee, str):
                        ee = [ee]
                    elif isinstance(ee, dict):
                        ee = [ee.get('text', '')]
                    
                    publication_info = {
                        'title': title,
                        'authors': authors,
                        'venue': venue,
                        'year': year,
                        'type': type_pub,
                        'key': key,
                        'doi': doi,
                        'url': url,
                        'pages': pages,
                        'electronic_editions': ee
                    }
                    
                    publications_data.append(publication_info)
                    
                except Exception as e:
                    logger.warning(f"处理出版物时出错: {str(e)}")
                    continue
            
            process_time = time.time() - start_time
            if publications_data:
                logger.info(f"DBLP数据处理完成！找到 {len(publications_data)} 个出版物。用时：{process_time:.2f}秒")
            else:
                logger.info(f"未找到有效的DBLP数据。用时：{process_time:.2f}秒")
            
            return publications_data
            
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f"DBLP访问错误: {str(e)}")
            logger.info(f"处理失败，用时：{process_time:.2f}秒")
            return []

    def get_orcid_profile(self, author_name: str) -> List[Dict[str, Any]]:
        """获取ORCID个人信息"""
        logger.info(f"正在处理ORCID个人数据 - 作者：{author_name}")
        start_time = time.time()
        
        profile_data = []
        base_url = "https://pub.orcid.org/v3.0/search"
        
        try:
            # 搜索ORCID个人
            logger.info(f"搜索ORCID个人: {author_name}")
            headers = {
                'Accept': 'application/json',
                'Content-Type': 'application/json'
            }
            
            # 构建搜索查询
            search_query = {
                "q": author_name,
                "start": 0,
                "rows": 10  # 获取前10个结果
            }
            
            response = self.session.get(
                base_url, 
                params=search_query, 
                headers=headers, 
                timeout=30
            )
            response.raise_for_status()
            
            data = response.json()
            results = data.get('result', [])
            
            if not results:
                logger.warning("未找到ORCID个人")
                return []
            
            logger.info(f"找到 {len(results)} 个ORCID个人，开始筛选匹配的结果")
            
            # 存储匹配的结果
            matched_results = []
            exact_match_count = 0
            
            # 存储所有结果的元组 (orcid_id, profile_info)，用于在没有完全匹配时使用
            all_results = []
            
            for result in results:
                try:
                    orcid_identifier = result.get('orcid-identifier', {})
                    orcid_id = orcid_identifier.get('path', '')
                    
                    if not orcid_id:
                        continue
                    
                    # 获取详细的个人资料
                    profile_url = f"https://pub.orcid.org/v3.0/{orcid_id}"
                    profile_response = self.session.get(profile_url, headers=headers, timeout=30)
                    profile_response.raise_for_status()
                    
                    profile_info = profile_response.json()
                    person = profile_info.get('person', {})
                    
                    # 提取个人信息
                    name = person.get('name', {})
                    given_names = name.get('given-names', {}).get('value', '') if name.get('given-names') else ''
                    family_name = name.get('family-name', {}).get('value', '') if name.get('family-name') else ''
                    full_name = f"{given_names} {family_name}".strip()
                    
                    # 保存当前结果
                    all_results.append((orcid_id, profile_info))
                    
                    # 检查名字是否完全匹配（忽略大小写）
                    if full_name.lower() == author_name.lower():
                        matched_results.append((orcid_id, profile_info))
                        exact_match_count += 1
                        logger.info(f"找到完全匹配: {full_name}")
                        
                        # 如果已经找到10个完全匹配的结果，就停止搜索
                        if exact_match_count >= 10:
                            break
                    
                    # 添加延迟，避免频繁请求
                    time.sleep(random.uniform(1, 2))
                    
                except Exception as e:
                    logger.warning(f"处理ORCID个人时出错: {str(e)}")
                    continue
            
            # 如果没有完全匹配的结果，使用前两个结果
            if not matched_results and all_results:
                logger.warning("未找到完全匹配的结果，使用前两个最接近的结果")
                matched_results = all_results[:2]
                logger.info(f"选取了 {len(matched_results)} 个最接近的结果")
            else:
                logger.info(f"找到 {exact_match_count} 个完全匹配的结果")
            
            # 处理匹配的结果
            for orcid_id, profile_info in matched_results:
                try:
                    person = profile_info.get('person', {})
                    name = person.get('name', {})
                    given_names = name.get('given-names', {}).get('value', '') if name.get('given-names') else ''
                    family_name = name.get('family-name', {}).get('value', '') if name.get('family-name') else ''
                    full_name = f"{given_names} {family_name}".strip()
                    
                    # 提取其他名称
                    other_names = []
                    other_names_data = name.get('other-names', {})
                    if other_names_data and 'other-name' in other_names_data:
                        for other_name in other_names_data['other-name']:
                            if isinstance(other_name, dict) and 'content' in other_name:
                                other_names.append(other_name['content'])
                    
                    # 提取个人简介
                    biography = ''
                    bio_data = person.get('biography', {})
                    if bio_data and isinstance(bio_data, dict):
                        biography = bio_data.get('content', '')
                    
                    # 提取关键词
                    keywords = []
                    keywords_data = person.get('keywords', {})
                    if keywords_data and 'keyword' in keywords_data:
                        for keyword in keywords_data['keyword']:
                            if isinstance(keyword, dict) and 'content' in keyword:
                                keywords.append(keyword['content'])
                    
                    # 提取外部标识符
                    external_identifiers = []
                    external_ids = person.get('external-identifiers', {})
                    if external_ids and 'external-identifier' in external_ids:
                        for ext_id in external_ids['external-identifier']:
                            if isinstance(ext_id, dict):
                                external_identifiers.append({
                                    'type': ext_id.get('external-id-type', ''),
                                    'value': ext_id.get('external-id-value', ''),
                                    'url': ext_id.get('external-id-url', {}).get('value', '') if ext_id.get('external-id-url') else ''
                                })
                    
                    # 提取就业信息
                    employments = []
                    activities = profile_info.get('activities-summary', {})
                    employments_data = activities.get('employments', {}).get('employment-summary', []) if activities else []
                    
                    for employment in employments_data:
                        if isinstance(employment, dict):
                            org = employment.get('organization', {})
                            employments.append({
                                'organization': org.get('name', ''),
                                'department': org.get('department-name', ''),
                                'role': employment.get('role-title', ''),
                                'start_date': employment.get('start-date', {}),
                                'end_date': employment.get('end-date', {})
                            })
                    
                    # 提取教育背景
                    educations = []
                    educations_data = activities.get('educations', {}).get('education-summary', []) if activities else []
                    
                    for education in educations_data:
                        if isinstance(education, dict):
                            org = education.get('organization', {})
                            educations.append({
                                'organization': org.get('name', ''),
                                'department': org.get('department-name', ''),
                                'role': education.get('role-title', ''),
                                'start_date': education.get('start-date', {}),
                                'end_date': education.get('end-date', {})
                            })
                    
                    profile_info = {
                        'orcid_id': orcid_id,
                        'full_name': full_name,
                        'given_names': given_names,
                        'family_name': family_name,
                        'other_names': other_names,
                        'biography': biography[:500] + '...' if len(biography) > 500 else biography,
                        'keywords': keywords,
                        'external_identifiers': external_identifiers,
                        'employments': employments,
                        'educations': educations,
                        'exact_match': full_name.lower() == author_name.lower()  # 添加标记表明是否为完全匹配
                    }
                    
                    profile_data.append(profile_info)
                    
                except Exception as e:
                    logger.warning(f"处理ORCID个人详细信息时出错: {str(e)}")
                    continue
            
            process_time = time.time() - start_time
            if profile_data:
                logger.info(f"ORCID个人数据处理完成！处理了 {len(profile_data)} 个结果。用时：{process_time:.2f}秒")
            else:
                logger.info(f"未找到任何ORCID个人数据。用时：{process_time:.2f}秒")
            
            return profile_data
            
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f"ORCID访问错误: {str(e)}")
            logger.info(f"处理失败，用时：{process_time:.2f}秒")
            return []

    def get_github_profile(self, author_name: str) -> Optional[Dict[str, Any]]:
        """获取GitHub个人信息"""
        logger.info(f"正在处理GitHub个人数据 - 作者：{author_name}")
        start_time = time.time()
        
        try:
            # 搜索GitHub用户
            logger.info(f"搜索GitHub用户: {author_name}")
            username = self.search_github_user(author_name)
            
            if not username:
                logger.warning("未找到GitHub用户")
                return None
            
            logger.info(f"找到GitHub用户名: {username}")
            
            # 获取用户详细信息
            profile_details = self.get_user_profile_details(username)
            
            if profile_details:
                profile_details['username'] = username
                process_time = time.time() - start_time
                logger.info(f"GitHub个人数据处理完成！用时：{process_time:.2f}秒")
                return profile_details
            else:
                process_time = time.time() - start_time
                logger.info(f"未找到有效的GitHub个人数据。用时：{process_time:.2f}秒")
                return None
                
        except Exception as e:
            process_time = time.time() - start_time
            logger.error(f"GitHub访问错误: {str(e)}")
            logger.info(f"处理失败，用时：{process_time:.2f}秒")
            return None

    def search_github_user(self, name: str) -> Optional[str]:
        """通过GitHub搜索页面获取用户信息"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # 处理搜索名字：去除方括号，将空格替换为加号
        name = name.strip('[]').replace(' ', '+')
        
        # GitHub搜索URL
        search_url = f'https://github.com/search?q={name}&type=users'
        
        max_retries = 3  # 最大重试次数
        current_retry = 0
        
        while current_retry < max_retries:
            try:
                # 在每次请求前添加延时
                time.sleep(2)  # 等待2秒
                
                # 获取搜索页面
                response = self.session.get(search_url, headers=headers, verify=False)
                if response.status_code == 200:
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    # 查找第一个用户结果
                    user_link = soup.select_one('div.search-title a')
                    if user_link and 'href' in user_link.attrs:
                        href = str(user_link['href'])
                        if href.startswith('/'):
                            href = href[1:]
                        return href
                    
                    # 如果没有找到搜索标题中的链接，尝试其他用户链接
                    user_links = soup.select('div.search-title a')
                    if user_links:
                        for link in user_links:
                            if 'href' in link.attrs:
                                href = str(link['href'])
                                if href.startswith('/'):
                                    href = href[1:]
                                if '/users/' not in href:
                                    return href
                    
                    return None
                    
                elif response.status_code == 429:
                    wait_time = 60 * (current_retry + 1)  # 每次重试增加等待时间
                    logger.warning(f"访问频率限制，等待{wait_time}秒后重试... ({name})")
                    time.sleep(wait_time)
                    current_retry += 1
                    continue
                
                return None
                
            except Exception as e:
                logger.warning(f"搜索GitHub用户时出错: {str(e)}")
                current_retry += 1
                if current_retry < max_retries:
                    time.sleep(30)  # 出错后等待30秒再重试
                    continue
                return None
        
        logger.warning(f"达到最大重试次数，无法搜索GitHub用户: {name}")
        return None

    def get_user_profile_details(self, username: str) -> Optional[Dict[str, Any]]:
        """获取用户个人资料的详细信息"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        max_retries = 3  # 最大重试次数
        current_retry = 0
        
        while current_retry < max_retries:
            try:
                # 在每次请求前添加延时
                time.sleep(2)  # 等待2秒
                
                # 访问用户主页
                profile_url = f'https://github.com/{username}'
                response = self.session.get(profile_url, headers=headers, verify=False)
                
                if response.status_code == 429:
                    wait_time = 60 * (current_retry + 1)  # 每次重试增加等待时间
                    logger.warning(f"访问频率限制，等待{wait_time}秒后重试... ({username})")
                    time.sleep(wait_time)
                    current_retry += 1
                    continue
                    
                if response.status_code == 200:
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    # 获取个人资料区域
                    profile_area = soup.select_one('div.js-profile-editable-area')
                    if not profile_area:
                        return None
                    
                    profile_details = {
                        'bio': "待补充",
                        'company': "待补充",
                        'location': "待补充",
                        'email': "待补充",
                        'website': "待补充",
                        'twitter': "待补充",
                        'linkedin': "待补充",
                        'followers': "待补充",
                        'following': "待补充",
                        'organizations': [],
                        'timezone': "待补充",  # 只保存时区信息
                        'crawled_at': datetime.utcnow().isoformat() + 'Z'
                    }
                    
                    # 获取个人简介
                    bio_div = profile_area.select_one('div.p-note.user-profile-bio div')
                    if bio_div:
                        profile_details['bio'] = bio_div.get_text(strip=True)
                    
                    # 获取详细信息
                    vcard_details = profile_area.select('ul.vcard-details li.vcard-detail')
                    for detail in vcard_details:
                        # 获取组织信息
                        if 'worksFor' in detail.get('itemprop', ''):
                            org_links = detail.select('a.user-mention')
                            profile_details['organizations'] = [link.get_text(strip=True) for link in org_links if link] or ["待补充"]
                            org_span = detail.select_one('span.p-org')
                            if org_span:
                                profile_details['company'] = org_span.get_text(strip=True)
                        
                        # 获取位置信息
                        elif 'homeLocation' in detail.get('itemprop', ''):
                            location_span = detail.select_one('span.p-label')
                            if location_span:
                                profile_details['location'] = location_span.get_text(strip=True)
                        
                        # 获取时区信息
                        elif 'localTime' in detail.get('itemprop', ''):
                            time_span = detail.select_one('span.p-label')
                            if time_span:
                                time_text = time_span.get_text(strip=True)
                                timezone = time_text.split('(')[-1].rstrip(')')
                                profile_details['timezone'] = timezone
                        
                        # 获取社交媒体链接
                        elif detail.get('itemprop') == 'social':
                            link = detail.select_one('a')
                            if link and hasattr(link, 'href'):
                                href = link['href']
                                if isinstance(href, str):
                                    if 'linkedin.com' in href:
                                        profile_details['linkedin'] = href
                                    elif 'twitter.com' in href:
                                        profile_details['twitter'] = href
                                    elif '@' in href:
                                        profile_details['email'] = href.replace('mailto:', '')
                                    elif href and not href.startswith('mailto:'):
                                        profile_details['website'] = href
                    
                    # 获取关注者和关注数
                    followers_link = profile_area.select_one('a[href$="?tab=followers"]')
                    if followers_link:
                        followers_text = followers_link.select_one('span.text-bold')
                        if followers_text:
                            profile_details['followers'] = followers_text.get_text(strip=True)
                    
                    following_link = profile_area.select_one('a[href$="?tab=following"]')
                    if following_link:
                        following_text = following_link.select_one('span.text-bold')
                        if following_text:
                            profile_details['following'] = following_text.get_text(strip=True)
                    
                    return profile_details
                
                return None
                
            except Exception as e:
                logger.warning(f"获取用户资料详情时出错: {str(e)}")
                current_retry += 1
                if current_retry < max_retries:
                    time.sleep(30)  # 出错后等待30秒再重试
                    continue
                return None
        
        logger.warning(f"达到最大重试次数，无法获取用户资料: {username}")
        return None 

    def _replace_null_values(self, data):
        """递归替换数据中的None/null值为'待补充'"""
        if isinstance(data, dict):
            return {k: self._replace_null_values(v) for k, v in data.items()}
        elif isinstance(data, list):
            return [self._replace_null_values(item) for item in data]
        elif data is None:
            return "待补充"
        return data

    def save_crawled_data(self, author_name: str, doc_id: str, author_data: Dict, output_dir: str = '.', 
                          db_name: str = "LZQ", collection_name: str = "LIST") -> Optional[str]:
        """将爬取的数据保存到统一的JSON文件中"""
        try:
            # 动态生成结果文件名
            output_filename = f"Results_{db_name}.{collection_name}_no_arxiv.json"
            output_path = os.path.join(output_dir, output_filename)
            
            # 替换所有null值
            author_data = self._replace_null_values(author_data)
            
            # 读取现有数据（如果文件存在）
            existing_data = []
            if os.path.exists(output_path):
                with open(output_path, 'r', encoding='utf-8') as f:
                    existing_data = json.load(f)
            
            # 检查是否已存在相同的mongodb_id
            for item in existing_data:
                if item.get('metadata', {}).get('mongodb_id') == doc_id:
                    logger.warning(f"跳过：已存在相同的mongodb_id ({doc_id})")
                    return output_path
            
            # 构建新的数据项
            new_item = {
                "metadata": {
                    "created_at": {"$date": datetime.now().isoformat()},
                    "updated_at": {"$date": datetime.now().isoformat()},
                    "version": "1.0",
                    "mongodb_id": doc_id
                },
                "author_data": author_data
            }
            
            # 添加新的数据项
            existing_data.append(new_item)
            
            # 按照mongodb_id排序
            existing_data.sort(key=lambda x: x.get('metadata', {}).get('mongodb_id', ''))
            
            # 保存数据
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(existing_data, f, ensure_ascii=False, indent=2)
            logger.info(f"数据已保存到: {output_path}")
            
            return output_path
                
        except Exception as e:
            logger.error(f"保存数据时出错: {str(e)}")
            return None

    def process_json_file(self, json_file_path):
        """处理单个JSON文件"""
        try:
            logger.info(f"\n{'='*50}")
            logger.info(f"开始处理文件: {json_file_path}")
            logger.info(f"{'='*50}")
            
            # 读取JSON文件
            logger.info("读取JSON文件...")
            with open(json_file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)

            # 保存并移除时间戳字段
            processed_at = data.pop('processed_at', {"$date": datetime.now().isoformat()})
            created_at = data.pop('created_at', {"$date": datetime.now().isoformat()})
            updated_at = data.pop('updated_at', {"$date": datetime.now().isoformat()})

            # 从raw_data中获取作者姓名
            raw_data = data.get('raw_data', {})
            author_name = raw_data.get('english_name', '')
            author_chinese_name = raw_data.get('chinese_name', '')
            
            if not author_name:
                logger.error(f"错误：在文件 {json_file_path} 中未找到作者英文名")
                return

            logger.info(f"\n处理作者: {author_name} ({author_chinese_name})")
            logger.info(f"{'='*50}")

            # 从不同来源获取数据
            logger.info("\n开始收集学术数据...")
            start_time = time.time()
            
            # 获取DBLP数据
            logger.info("\n获取DBLP数据...")
            dblp_data = self.get_dblp_publications(author_name)
            
            # 获取ORCID数据
            logger.info("\n获取ORCID数据...")
            orcid_data = self.get_orcid_profile(author_name)
            
            # 检查是否已有GitHub数据
            existing_github_data = data.get('github_raw_data')
            if existing_github_data:
                logger.info("\n已存在GitHub数据，跳过GitHub爬取...")
                github_data = existing_github_data
            else:
                logger.info("\n获取GitHub数据...")
                github_data = self.get_github_profile(author_name)
            
            # 构建作者数据结构
            author_data = {
                "name": {
                    "english": author_name,
                    "chinese": author_chinese_name
                },
                "sources": {
                    "dblp": {
                        "last_updated": datetime.now().isoformat(),
                        "publication_count": len(dblp_data),
                        "publications": dblp_data
                    },
                    "orcid": {
                        "last_updated": datetime.now().isoformat(),
                        "profile_count": len(orcid_data),
                        "profiles": orcid_data
                    },
                    "github": {
                        "last_updated": datetime.now().isoformat(),
                        "profile": github_data
                    }
                },
                "metadata": {
                    "created_at": created_at,
                    "updated_at": {"$date": datetime.now().isoformat()},
                    "processed_at": {"$date": datetime.now().isoformat()}
                }
            }
            
            # 获取输出目录（使用输入文件的目录）
            output_dir = os.path.dirname(json_file_path)
            if not output_dir:
                output_dir = '.'
                
            # 保存到统一的JSON文件
            self.save_crawled_data(author_name, "", author_data, output_dir)
            
            process_time = time.time() - start_time
            logger.info(f"\n处理完成！用时：{process_time:.2f}秒")
                
        except Exception as e:
            logger.error(f"处理文件时出错: {str(e)}")
            return None

    def process_all_json_files(self, directory='.'):
        """处理目录下的所有JSON文件"""
        # 获取目录下所有的JSON文件
        json_files = glob.glob(os.path.join(directory, '*.json'))
        total_files = len(json_files)
        
        logger.info(f"\n开始处理目录: {directory}")
        logger.info(f"找到 {total_files} 个JSON文件待处理")
        logger.info(f"{'='*50}")
        
        for index, json_file in enumerate(json_files, 1):
            logger.info(f"\n处理第 {index}/{total_files} 个文件")
            self.process_json_file(json_file)
            if index < total_files:
                delay = random.uniform(0, 1)
                logger.info(f"等待 {delay:.1f} 秒后处理下一个文件...")
                time.sleep(delay)
        
        logger.info(f"\n所有文件处理完成！")
        logger.info(f"{'='*50}\n")

    def _get_mongodb_connection(self):
        """获取MongoDB连接"""
        username = "root"
        password = "pj9gbcht"
        host = "dbconn.sealosbja.site"
        port = 31140
        
        # 构建连接URI
        uri = f"mongodb://{username}:{password}@{host}:{port}/?directConnection=true"
        return MongoClient(uri)

    def process_mongodb_documents(self, start_index: int = 1, stop_check=None, 
                                 db_name: str = "LZQ", collection_name: str = "LIST"):
        """处理MongoDB中的文档
        
        Args:
            start_index (int): 开始处理的文档索引（从1开始）
            stop_check (callable): 停止检查函数，返回True时停止处理
            db_name (str): MongoDB数据库名称
            collection_name (str): MongoDB集合名称
        """
        try:
            self.logger.info(f"\n开始处理MongoDB数据：数据库 '{db_name}', 集合 '{collection_name}'...")
            client = self._get_mongodb_connection()
            db = client[db_name]
            collection = db[collection_name]
            
            # 获取所有文档
            cursor = collection.find({})
            documents = list(cursor)
            total_docs = len(documents)
            
            # 初始化状态
            self.total_count = total_docs
            self.processed_count = start_index - 1  # 初始化为起始索引前的计数
            self.start_time = datetime.now()
            self.is_running = True
            self.stop_requested = False
            
            self.logger.info(f"找到 {total_docs} 个文档待处理")
            self.logger.info(f"将从第 {start_index} 个文档开始处理")
            self.logger.info(f"{'='*50}")
            self.update_status()
            
            # 读取现有的Results文件
            output_path = os.path.join('.', 'Results_LZQ.LIST_no_arxiv.json')
            existing_data = []
            if os.path.exists(output_path):
                with open(output_path, 'r', encoding='utf-8') as f:
                    existing_data = json.load(f)
            
            # 创建已存在的mongodb_id集合，用于快速查找
            existing_ids = {item.get('metadata', {}).get('mongodb_id') for item in existing_data}
            
            # 从指定索引开始处理
            success_count = 0  # 成功处理计数
            for index, doc in enumerate(documents[start_index-1:], start_index):
                # 检查是否需要停止
                if self.stop_requested or (stop_check and stop_check()):
                    self.logger.info(f"\n收到停止信号，停止处理")
                    self.logger.info(f"已处理 {success_count} 个文档")
                    break
                
                try:
                    # 更新当前作者信息
                    raw_data = doc.get('raw_data', {})
                    structed_data = doc.get('structed_data', {})

                    # 优先从structed_data获取英文名
                    english_name = ""
                    chinese_name = ""

                    # 安全获取英文名
                    if isinstance(structed_data, dict):
                        name_data = structed_data.get('name', {})
                        if isinstance(name_data, dict):
                            english_name = name_data.get('english', '')
                            chinese_name = name_data.get('chinese', '')

                    # 如果英文名存在则使用
                    if english_name:
                        author_name = english_name
                    # 如果没有英文名但有中文名，则使用中文名
                    elif chinese_name:
                        author_name = chinese_name
                    # 最后回退到raw_data的姓名
                    else:
                        author_name = raw_data.get('姓名', 'Unknown')

                    # 更新状态：当前处理的文档索引和作者
                    self.processed_count = index
                    self.current_author = author_name
                    self.update_status()

                    self.logger.info(f"\n检查第 {index}/{total_docs} 个文档")
                    self.logger.info(f"作者姓名: {author_name}")

                    # 获取MongoDB文档ID
                    doc_id = doc.get('_id', {})
                    if isinstance(doc_id, dict):
                        doc_id = doc_id.get('$oid', '')
                    else:
                        doc_id = str(doc_id)
                    # 检查是否已存在
                    if doc_id in existing_ids:
                        self.logger.warning(f"跳过：文档ID {doc_id} 已存在于结果文件中")
                        continue
                    
                    if not author_name or author_name == 'Unknown':
                        self.logger.warning("跳过：未找到作者姓名")
                        continue
                        
                    self.logger.info(f"\n处理作者: {author_name}")
                    self.logger.info(f"文档ID: {doc_id}")
                    self.logger.info(f"{'='*50}")
                    
                    # 获取各个数据源的信息
                    dblp_data = self.get_dblp_publications(author_name)
                    orcid_data = self.get_orcid_profile(author_name)
                    
                    # 检查是否已有GitHub数据
                    existing_github_data = doc.get('github_raw_data')
                    if existing_github_data:
                        self.logger.info("\n已存在GitHub数据，跳过GitHub爬取...")
                        github_data = existing_github_data
                    else:
                        self.logger.info("\n获取GitHub数据...")
                        github_data = self.get_github_profile(author_name)
                    
                    # 构建作者数据
                    author_data = {
                        "name": author_name,
                        "sources": {
                            "dblp": {
                                "last_updated": datetime.now().isoformat(),
                                "publication_count": len(dblp_data),
                                "publications": dblp_data
                            },
                            "orcid": {
                                "last_updated": datetime.now().isoformat(),
                                "profile_count": len(orcid_data),
                                "profiles": orcid_data
                            },
                            "github": {
                                "last_updated": datetime.now().isoformat(),
                                "profile": github_data
                            }
                        },
                        "metadata": {
                            "processed_at": {"$date": datetime.now().isoformat()},
                            "created_at": {"$date": datetime.now().isoformat()},
                            "updated_at": {"$date": datetime.now().isoformat()},
                            "mongodb_id": doc_id
                        }
                    }
                    
                    # 保存作者数据
                    self.save_crawled_data(author_name, doc_id, author_data, db_name=db_name, collection_name=collection_name)  
                    existing_ids.add(doc_id)  # 更新已处理ID集合
                    success_count += 1  # 增加成功计数
                    
                    self.logger.info(f"作者 {author_name} 处理完成")
                    
                    # 添加延迟避免请求过于频繁
                    if index < total_docs:
                        delay = random.uniform(0.5, 2.0)
                        self.logger.info(f"等待 {delay:.1f} 秒后处理下一个文档...")
                        time.sleep(delay)
                        
                except Exception as e:
                    self.logger.error(f"处理文档时出错: {str(e)}")
                    continue
                    
            self.logger.info(f"\n处理完成! 共处理 {success_count} 个文档")
            
        except Exception as e:
            self.logger.error(f"MongoDB处理错误: {str(e)}")
        finally:
            # 最终状态更新
            self.is_running = False
            self.update_status()
            
            if client:
                client.close()
                self.logger.info("MongoDB连接已关闭")

def main():
    """主函数"""
    crawler = ScholarCrawler()
    
    try:
        # 从环境变量获取数据库名和集合名，如果不存在则使用默认值
        db_name = os.getenv("DB_NAME", "LZQ")
        collection_name = os.getenv("COLLECTION_NAME", "LIST")
        
        crawler.process_mongodb_documents(
            start_index=1,
            db_name=db_name,
            collection_name=collection_name
        )
        
    except Exception as e:
        logger.error(f"程序执行出错: {str(e)}")

if __name__ == "__main__":
    main()