import re
from bs4 import BeautifulSoup
from typing import Dict, List, Any, Optional
from datetime import datetime
import time
import random
from .base_crawler import BaseCrawler

class OpenAIBlogCrawler(BaseCrawler):
    """
    OpenAI博客爬虫，爬取最新的OpenAI公司动态
    """
    
    def __init__(self, source_config: Dict[str, Any]):
        """
        初始化OpenAI博客爬虫
        
        Args:
            source_config: 数据源配置
        """
        super().__init__(source_config)
    
    def extract_data(self, soup: BeautifulSoup) -> List[Dict[str, Any]]:
        """
        从OpenAI博客页面提取文章数据
        
        Args:
            soup: 解析后的页面
            
        Returns:
            文章数据列表
        """
        results = []
        
        # 查找所有博客文章
        article_elements = soup.find_all('article')
        if not article_elements:
            # 尝试其他方式查找文章
            article_elements = soup.select('.post-card, .blog-post, .blog-entry')
        
        for article in article_elements:
            try:
                # 提取标题
                title_element = article.find(['h1', 'h2', 'h3'], class_=re.compile(r'title|heading', re.I))
                if not title_element:
                    title_element = article.find(['h1', 'h2', 'h3'])
                title = title_element.text.strip() if title_element else "无标题"
                
                # 提取链接
                link_element = title_element.find('a') if title_element else None
                if not link_element and title_element:
                    # 标题本身可能是链接
                    link_element = title_element if title_element.name == 'a' else None
                
                # 如果还没找到链接，尝试在文章标签中查找
                if not link_element:
                    link_element = article.find('a', href=re.compile(r'/blog|/research|/news'))
                
                url = ""
                if link_element and 'href' in link_element.attrs:
                    url = link_element['href']
                    # 如果是相对URL，转换为绝对URL
                    if url.startswith('/'):
                        url = 'https://openai.com' + url
                        
                # 提取发布日期
                date_element = article.find(['time', 'span', 'div'], class_=re.compile(r'date|time|pub', re.I))
                date_str = date_element.text.strip() if date_element else ""
                date_str = date_str.replace('Published:', '').replace('Posted:', '').strip()
                
                # 尝试多种日期格式
                published_date = None
                for date_format in ['%B %d, %Y', '%Y-%m-%d', '%d %B %Y', '%m/%d/%Y']:
                    try:
                        published_date = datetime.strptime(date_str, date_format).isoformat()
                        break
                    except:
                        continue
                
                if not published_date:
                    # 如果未找到日期，使用当前日期
                    published_date = datetime.now().isoformat()
                
                # 提取摘要或简短描述
                summary_element = article.find(['p', 'div'], class_=re.compile(r'summary|excerpt|description', re.I))
                if not summary_element:
                    summary_element = article.find('p')
                summary = summary_element.text.strip() if summary_element else ""
                
                # 尝试提取作者信息
                author_element = article.find(['span', 'div', 'a'], class_=re.compile(r'author|byline', re.I))
                author = author_element.text.strip() if author_element else "OpenAI"
                author = author.replace('By', '').replace('by', '').strip()
                
                # 提取文章ID
                article_id = ""
                if url:
                    # 从URL中提取ID
                    url_parts = url.rstrip('/').split('/')
                    article_id = url_parts[-1]
                
                # 获取封面图片(如果有)
                image_url = ""
                img_element = article.find('img')
                if img_element and 'src' in img_element.attrs:
                    image_url = img_element['src']
                    # 如果是相对URL，转换为绝对URL
                    if image_url.startswith('/'):
                        image_url = 'https://openai.com' + image_url
                
                article_data = {
                    'type': 'company_news',
                    'id': article_id,
                    'title': title,
                    'summary': summary,
                    'url': url,
                    'published_date': published_date,
                    'author': author,
                    'image_url': image_url,
                    'company': 'OpenAI'
                }
                
                # 如果有详情页链接，获取完整内容
                if url:
                    try:
                        details = self._get_article_details(url)
                        if details:
                            article_data.update(details)
                    except Exception as e:
                        self.logger.error(f"Error fetching details for {url}: {str(e)}")
                
                results.append(article_data)
            except Exception as e:
                self.logger.error(f"Error parsing article entry: {str(e)}")
        
        return results
    
    def _get_article_details(self, url: str) -> Optional[Dict[str, Any]]:
        """
        获取文章详细内容
        
        Args:
            url: 文章URL
            
        Returns:
            文章详细内容
        """
        try:
            # 添加随机延迟，避免请求过于频繁
            time.sleep(random.uniform(1, 3))
            soup = self.fetch_page(url)
            if not soup:
                return None
            
            # 提取正文内容
            content_element = soup.find(['div', 'article'], class_=re.compile(r'content|post-content|article-body', re.I))
            if not content_element:
                # 尝试其他选择器
                content_element = soup.find('article') or soup.find('main')
                
            content = ""
            if content_element:
                # 移除不需要的元素
                for elem in content_element.find_all(['script', 'style', 'nav', 'header', 'footer']):
                    elem.extract()
                
                # 获取所有段落
                paragraphs = content_element.find_all('p')
                content = "\n\n".join([p.text.strip() for p in paragraphs])
            
            # 提取标签(如果有)
            tags = []
            tag_elements = soup.find_all(['a', 'span'], class_=re.compile(r'tag', re.I))
            for tag in tag_elements:
                tag_text = tag.text.strip()
                if tag_text and tag_text not in tags:
                    tags.append(tag_text)
            
            return {
                'content': content,
                'tags': tags,
                'word_count': len(content.split())
            }
        
        except Exception as e:
            self.logger.error(f"Error fetching article details: {str(e)}")
            return None 