import json
import requests
from bs4 import BeautifulSoup
import time
import random
import os
import re
from datetime import datetime
from typing import Dict, Any, Optional, List, Tuple
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import logging
import threading

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler("dblp_crawler.log")
    ]
)
logger = logging.getLogger(__name__)

class DBLPCrawler:
    def __init__(self, status_callback=None, logger=None):
        self.status_callback = status_callback
        self.logger = logger or logging.getLogger(__name__)
        
        # 状态管理
        self.is_running = False
        self.processed_count = 0
        self.total_count = 0
        self.success_count = 0
        self.current_title = ""
        self.start_time = None
        self.stop_requested = False
        
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        }
        
        self.session = self._create_session()
        self.logger.info("DBLP爬虫初始化完成")
    
    def _create_session(self):
        """创建带有重试机制的会话"""
        session = requests.Session()
        
        retry = Retry(
            total=5,
            backoff_factor=1.0,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["GET", "POST"]
        )
        
        adapter = HTTPAdapter(max_retries=retry)
        session.mount('http://', adapter)
        session.mount('https://', adapter)
        session.headers.update(self.headers)
        return session

    def update_status(self, **kwargs):
        """更新爬虫状态"""
        for key, value in kwargs.items():
            if hasattr(self, key):
                setattr(self, key, value)
        
        if self.status_callback:
            # 计算成功率
            success_rate = 0
            if self.processed_count > 0 and hasattr(self, 'success_count'):
                success_rate = round((self.success_count / self.processed_count) * 100, 2)
            
            status_data = {
                'crawler_type': 'dblp',
                'is_running': self.is_running,
                'processed_count': self.processed_count,
                'total_count': self.total_count,
                'success_count': getattr(self, 'success_count', 0),
                'success_rate': success_rate,
                'current_author': self.current_title,  # 使用current_title作为current_author
                'start_time': self.start_time.isoformat() if self.start_time else None,
                'status': 'running' if self.is_running else 'idle'
            }
            self.status_callback(status_data)

    def stop(self):
        """停止爬虫"""
        self.stop_requested = True
        self.is_running = False
        self.logger.info("爬虫停止请求已接收")
        self.update_status()

    def get_authors_from_paper_url(self, paper_url: str, stop_check=None) -> Tuple[List[Dict[str, str]], Optional[Exception]]:
        """
        从论文页面提取所有作者信息（带重试机制）
        
        返回：
        (authors_list, error) - 作者列表和可能的错误对象
        """
        self.logger.info(f"处理论文URL: {paper_url}")
        start_time = time.time()
        
        retries = 5
        error = None
        
        for attempt in range(retries):
            # 检查停止请求 - 在每次重试前检查
            if self.stop_requested or (stop_check and stop_check()):
                self.logger.info("爬虫已停止，中断处理")
                return [], None
                
            self.logger.info(f"尝试 {attempt+1}/{retries}: {paper_url}")
            
            try:
                # 添加随机延迟避免被封
                delay = random.uniform(1.5, 3.5)
                self.logger.info(f"等待 {delay:.1f} 秒...")
                
                # 分段等待，以便能够及时响应停止信号
                wait_interval = 0.5  # 每0.5秒检查一次停止信号
                remaining_delay = delay
                while remaining_delay > 0:
                    if self.stop_requested or (stop_check and stop_check()):
                        self.logger.info("爬虫已停止，中断等待")
                        return [], None
                    
                    sleep_time = min(wait_interval, remaining_delay)
                    time.sleep(sleep_time)
                    remaining_delay -= sleep_time
                
                # 检查停止请求 - 在发送请求前检查
                if self.stop_requested or (stop_check and stop_check()):
                    self.logger.info("爬虫已停止，中断处理")
                    return [], None
                    
                self.logger.info(f"发送请求: 获取论文页面")
                response = self.session.get(paper_url, timeout=20)
                
                # 处理速率限制情况
                if response.status_code == 429:
                    retry_after = int(response.headers.get('Retry-After', 15))
                    total_wait = retry_after + random.uniform(1, 3)
                    self.logger.warning(f"触发速率限制，等待 {total_wait:.1f} 秒")
                    
                    # 分段等待，以便能够及时响应停止信号
                    wait_interval = 1.0  # 每1秒检查一次停止信号
                    remaining_wait = total_wait
                    while remaining_wait > 0:
                        if self.stop_requested or (stop_check and stop_check()):
                            self.logger.info("爬虫已停止，中断速率限制等待")
                            return [], None
                        
                        sleep_time = min(wait_interval, remaining_wait)
                        time.sleep(sleep_time)
                        remaining_wait -= sleep_time
                    
                    continue
                
                response.raise_for_status()
                
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 尝试查找页面标题
                page_title = ""
                title_tag = soup.find('h1')
                if title_tag:
                    title_text = title_tag.get_text()
                    if title_text:
                        page_title = title_text.strip()[:80]
                
                self.logger.info(f"页面标题: {page_title}")
                
                authors = []
                # 查找所有作者元素
                author_elements = soup.find_all('li', {'itemprop': 'author'}) or soup.find_all('span', {'itemprop': 'author'})
                
                # 尝试后备方法
                if not author_elements:
                    self.logger.info("未找到带itemprop的作者元素，尝试备用方法")
                    author_elements = soup.select('.author a[href^="/pid/"]') or soup.select('.authors a[href]')
                
                self.logger.info(f"找到 {len(author_elements)} 个作者元素")
                
                for i, author in enumerate(author_elements):
                    # 检查停止请求 - 在处理每个作者时检查
                    if self.stop_requested or (stop_check and stop_check()):
                        self.logger.info("爬虫已停止，中断处理")
                        return [], None
                        
                    name = ""
                    dblp_url = ""
                    
                    # 尝试提取名字和URL的不同方法
                    if author.name == 'li' or author.name == 'span':
                        author_link = author.find('a', {'itemprop': 'url'})
                        if author_link:
                            name_span = author_link.find('span', {'itemprop': 'name'})
                            if name_span:
                                name = name_span.get('title', name_span.text.strip())
                                dblp_url = author_link.get('href', '')
                    
                    # 如果以上方法没找到，尝试直接提取
                    if not name and author.name == 'a':
                        name = author.get_text(strip=True)
                        dblp_url = author.get('href', '')
                    
                    # 如果还是没有，继续尝试其他方法
                    if not name:
                        name = author.get_text(strip=True)
                    
                    # 规范化DBLP URL
                    if dblp_url and not dblp_url.startswith('http'):
                        dblp_url = f"https://dblp.org{dblp_url}"
                    
                    # 只保留有效的URL
                    if not dblp_url.startswith('https://dblp.org/'):
                        dblp_url = ""
                    
                    if name:
                        self.logger.info(f"  [{i+1}] 找到作者: {name}")
                        authors.append({
                            'name': name,
                            'dblp_url': dblp_url
                        })
                
                process_time = time.time() - start_time
                self.logger.info(f"论文处理完成! 用时: {process_time:.2f}秒, 找到 {len(authors)} 位作者")
                return authors, None
                
            except requests.exceptions.RequestException as e:
                error = e
                self.logger.error(f"请求失败 (尝试 {attempt + 1}/{retries}): {str(e)}")
                if attempt < retries - 1:
                    wait_time = (attempt + 1) * 8
                    self.logger.info(f"等待 {wait_time} 秒后重试...")
                    time.sleep(wait_time)
                else:
                    self.logger.error(f"达到最大重试次数，处理失败: {paper_url}")
                    return [], error
            except Exception as e:
                error = e
                self.logger.error(f"处理论文时出错: {str(e)}", exc_info=True)
                return [], error
        
        return [], error

    def save_crawled_data(self, data: List[Dict], output_file: str) -> str:
        try:
            # 直接保存数据，不添加元数据
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"数据已保存到: {output_file} (总记录数: {len(data)})")
            return output_file
            
        except Exception as e:
            self.logger.error(f"保存数据失败: {str(e)}", exc_info=True)
            return ""

    def _normalize_title(self, title: str) -> str:
        """规范化论文标题"""
        if not title:
            return ""
        
        # 移除特殊字符和多余空格
        title = re.sub(r'[^\x00-\x7F]+', ' ', title)  # 移除非ASCII字符
        title = re.sub(r'\s+', ' ', title)  # 合并多个空白
        title = title.replace('\n', ' ').replace('\r', '').replace('\t', ' ')
        title = title.strip()
        
        # 限制长度
        return title[:100] + "..." if len(title) > 100 else title

    def update_author_urls(self, input_file: str, output_file: str = None, 
                          db_name: str = "LZQ", collection_name: str = "LIST", stop_check=None):
        """处理论文URL
        
        Args:
            input_file (str): 输入文件路径
            output_file (str, optional): 输出文件路径。如果未提供，将自动生成
            db_name (str): MongoDB数据库名称
            collection_name (str): MongoDB集合名称
            stop_check (callable, optional): 停止检查函数
        """
        self.input_file = input_file
        
        # 如果未提供输出文件，自动生成文件名
        if output_file is None:
            output_file = f"Results_{db_name}.{collection_name}_dblp_only.json"
        
        try:
            self.logger.info(f"开始处理文件: {input_file}")
            self.logger.info(f"输出文件: {output_file}")
            self.logger.info(f"数据库: {db_name}, 集合: {collection_name}")
            
            # 读取输入文件
            if not os.path.exists(input_file):
                self.logger.error(f"输入文件不存在: {input_file}")
                return
            
            with open(input_file, 'r', encoding='utf-8') as f:
                input_data = json.load(f)
            
            # 检查输出文件是否存在
            processed_papers = set()
            existing_data = input_data.copy()  # 初始使用输入数据
            
            # 处理输出文件为空或不存在的情况
            if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
                try:
                    with open(output_file, 'r', encoding='utf-8') as f:
                        existing_data = json.load(f)
                        # 获取已处理的论文URL
                        for author in existing_data:
                            dblp_source = author.get('author_data', {}).get('sources', {}).get('dblp', {})
                            for pub in dblp_source.get('publications', []):
                                if 'extracted_authors' in pub:
                                    processed_papers.add(pub['url'])
                    self.logger.info(f"发现已有输出文件，已处理 {len(processed_papers)} 篇论文")
                except (json.JSONDecodeError, Exception) as e:
                    self.logger.error(f"读取已有输出文件失败: {str(e)}，将创建新文件")
                    existing_data = input_data  # 从头开始处理
            else:
                self.logger.info(f"输出文件不存在或为空，将从零开始处理")
                existing_data = input_data
                
            # 统计需要处理的论文总数
            total_papers = 0
            for author in existing_data:
                dblp_source = author.get('author_data', {}).get('sources', {}).get('dblp', {})
                for pub in dblp_source.get('publications', []):
                    if pub['url'] not in processed_papers:
                        total_papers += 1
            
            if total_papers == 0:
                self.logger.info("所有论文已处理完成，无需再次处理")
                return
            
            self.logger.info(f"需要处理 {total_papers} 篇新论文")
            
            # 初始化状态
            self.total_count = total_papers
            self.processed_count = 0
            self.success_count = 0
            self.start_time = datetime.now()
            self.is_running = True
            self.stop_requested = False
            self.update_status()  # 更新状态
            
            # 处理每篇论文
            processed_count = 0
            for author_idx, author in enumerate(existing_data):
                # 检查停止请求 - 在处理每个作者前检查
                if self.stop_requested or (stop_check and stop_check()):
                    self.logger.info("收到停止请求，中断处理")
                    break
                    
                dblp_source = author.get('author_data', {}).get('sources', {}).get('dblp', {})
                publications = dblp_source.get('publications', [])
                
                for pub_idx, pub in enumerate(publications):
                    # 检查是否已处理
                    if pub['url'] in processed_papers:
                        continue
                    
                    # 检查停止请求 - 在处理每篇论文前检查
                    if self.stop_requested or (stop_check and stop_check()):
                        self.logger.info("收到停止请求，中断处理")
                        break
                    
                    # 更新状态
                    self.processed_count += 1
                    title = self._normalize_title(pub.get('title', ''))
                    self.current_title = f"{title[:60]}..." if title else "Unknown Title"
                    self.update_status()  # 更新状态
                    
                    self.logger.info(f"处理论文 {self.processed_count}/{self.total_count}: {self.current_title}")
                    self.logger.info(f"  URL: {pub['url']}")
                    
                    # 获取作者信息
                    authors, error = self.get_authors_from_paper_url(pub['url'], stop_check)
                    
                    # 添加提取的作者信息到论文记录
                    pub['extracted_authors'] = authors
                    if error:
                        pub['extraction_error'] = str(error)
                    
                    # 创建包含URL的作者信息，使用字典格式
                    formatted_authors = []
                    for auth in authors:
                        author_dict = {
                            "author_name": auth['name']
                        }
                        if auth.get('dblp_url'):
                            author_dict["author_url"] = auth['dblp_url']
                        formatted_authors.append(author_dict)
                    
                    # 更新作者字段
                    pub['authors'] = formatted_authors
                    
                    # 添加日志信息
                    author_count = len(authors)
                    status = "成功" if error is None else f"失败: {error}"
                    self.logger.info(f"  结果: {status}, 找到 {author_count} 位作者")
                    
                    # 记录前3位作者信息作为示例
                    if author_count > 0:
                        sample_authors = formatted_authors[:3]
                        # 修复变量名错误：将 sample_str 改为 sample_authors
                        sample_str = ", ".join([f"{a['author_name']} ({a.get('author_url', '无URL')})" for a in sample_authors])
                        self.logger.info(f"  示例作者: {sample_str}... (共 {author_count} 位)")
                    
                    # 更新成功计数
                    if error is None:
                        self.success_count += 1
                    
                    processed_count += 1
                    
                    # 每处理10篇保存一次中间结果
                    if processed_count % 10 == 0:
                        # 检查停止请求 - 在保存前检查
                        if self.stop_requested or (stop_check and stop_check()):
                            self.logger.info("收到停止请求，中断处理")
                            break
                        
                        self.save_crawled_data(existing_data, output_file)
                        self.logger.info(f"保存进度 ({self.processed_count}/{self.total_count})")
                
                # 检查是否中断
                if self.stop_requested or (stop_check and stop_check()):
                    break
            
            # 保存最终结果
            if not (self.stop_requested or (stop_check and stop_check())):
                self.save_crawled_data(existing_data, output_file)
                self.logger.info(f"DBLP爬虫处理完成! 总共处理 {self.processed_count} 篇论文")
            else:
                self.logger.info(f"DBLP爬虫被停止! 总共处理 {self.processed_count} 篇论文")
            
            # 更新最终状态
            self.is_running = False
            self.update_status()  # 更新状态
            
        except Exception as e:
            self.logger.error(f"处理论文URL时出错: {str(e)}", exc_info=True)
            self.is_running = False
            self.update_status()  # 更新状态
            # 保存已处理的部分结果
            try:
                self.save_crawled_data(existing_data, output_file)
                self.logger.info("已保存部分处理结果")
            except Exception as save_error:
                self.logger.error(f"保存部分结果失败: {str(save_error)}")

    # 适配app.py的接口方法
    def process_mongodb_documents(self, start_index=1, stop_check=None, 
                                 db_name: str = "LZQ", collection_name: str = "LIST"):
        """适配app.py的接口方法"""
        # 对于DBLP爬虫，start_index参数实际上不使用
        # 我们使用固定的输入文件路径
        input_file = "Results_LZQ.LIST_no_arxiv.json"
        output_file = f"Results_{db_name}.{collection_name}_dblp_only.json"
        
        # 设置停止检查函数
        self.stop_check = stop_check or (lambda: self.stop_requested)
        
        # 启动处理任务
        self.update_author_urls(
            input_file=input_file,
            output_file=output_file,
            db_name=db_name,
            collection_name=collection_name,
            stop_check=stop_check
        )

def main():
    """主函数"""
    crawler = DBLPCrawler()
    
    # 从环境变量获取配置
    input_file = os.getenv("INPUT_FILE", "Results_LZQ.LIST_no_arxiv.json")
    db_name = os.getenv("DB_NAME", "LZQ")
    collection_name = os.getenv("COLLECTION_NAME", "LIST")
    
    # 自动生成输出文件名
    output_file = os.getenv("OUTPUT_FILE", f"Results_{db_name}.{collection_name}_dblp_only.json")
    
    # 处理论文
    crawler.update_author_urls(
        input_file=input_file,
        output_file=output_file,
        db_name=db_name,
        collection_name=collection_name
    )

if __name__ == "__main__":
    main()