import asyncio
import aiohttp
import requests
from bs4 import BeautifulSoup
try:
    from newspaper import Article
except ImportError:
    Article = None

try:
    from fake_useragent import UserAgent
except ImportError:
    UserAgent = None
from urllib.parse import urljoin, urlparse
import time
import logging
from typing import Optional, Dict, Any
from datetime import datetime
from models.article import ArticleModel, CrawlResult
from config.settings import settings

logger = logging.getLogger(__name__)

class WebScraper:
    def __init__(self):
        self.ua = UserAgent() if settings.USE_RANDOM_USER_AGENT else None
        self.session = requests.Session()
        self.setup_session()
    
    def setup_session(self):
        """设置请求会话"""
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }
        
        if self.ua:
            headers['User-Agent'] = self.ua.random
        else:
            headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        
        self.session.headers.update(headers)
    
    def get_random_user_agent(self) -> str:
        """获取随机用户代理"""
        if self.ua:
            return self.ua.random
        return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
    
    async def scrape_article(self, url: str, industry_keyword: str) -> CrawlResult:
        """
        爬取单篇文章
        
        Args:
            url: 文章URL
            industry_keyword: 行业关键词
            
        Returns:
            CrawlResult: 爬取结果
        """
        try:
            # 第一步：获取页面HTML
            html_content = await self._fetch_html(url)
            if not html_content:
                return CrawlResult(
                    success=False,
                    error="无法获取页面内容",
                    url=url
                )
            
            # 第二步：使用newspaper3k提取正文
            article_data = self._extract_content_with_newspaper(url, html_content)
            
            # 第三步：如果newspaper提取失败，使用BeautifulSoup备用方案
            if not article_data.get('content') or len(article_data['content']) < settings.MIN_CONTENT_LENGTH:
                logger.warning(f"newspaper提取失败，使用BeautifulSoup备用方案: {url}")
                article_data = self._extract_content_with_bs4(html_content, url)
            
            # 第四步：验证内容
            if not self._validate_content(article_data):
                return CrawlResult(
                    success=False,
                    error="内容验证失败或内容过短",
                    url=url
                )
            
            # 第五步：创建文章模型
            article = self._create_article_model(article_data, url, industry_keyword)
            
            return CrawlResult(
                success=True,
                article=article,
                url=url
            )
            
        except Exception as e:
            logger.error(f"爬取文章失败 {url}: {e}")
            return CrawlResult(
                success=False,
                error=str(e),
                url=url
            )
    
    async def _fetch_html(self, url: str) -> Optional[str]:
        """获取页面HTML内容"""
        for attempt in range(settings.MAX_RETRIES):
            try:
                # 添加请求延迟
                if attempt > 0:
                    await asyncio.sleep(settings.REQUEST_DELAY * (attempt + 1))
                
                # 更新User-Agent
                if self.ua:
                    self.session.headers['User-Agent'] = self.ua.random
                
                response = self.session.get(
                    url,
                    timeout=settings.REQUEST_TIMEOUT,
                    allow_redirects=True
                )
                
                response.raise_for_status()
                
                # 检测编码
                response.encoding = response.apparent_encoding or 'utf-8'
                
                return response.text
                
            except requests.exceptions.RequestException as e:
                logger.warning(f"请求失败 (尝试 {attempt + 1}/{settings.MAX_RETRIES}) {url}: {e}")
                if attempt == settings.MAX_RETRIES - 1:
                    return None
                    
        return None
    
    def _extract_content_with_newspaper(self, url: str, html_content: str) -> Dict[str, Any]:
        """使用newspaper3k提取文章内容"""
        try:
            article = Article(url, language='zh')
            article.set_html(html_content)
            article.parse()
            
            return {
                'title': article.title or '',
                'content': article.text or '',
                'summary': article.summary or '',
                'author': ', '.join(article.authors) if article.authors else '',
                'publish_date': article.publish_date,
                'meta_description': article.meta_description or '',
                'keywords': article.keywords or []
            }
            
        except Exception as e:
            logger.error(f"newspaper提取失败: {e}")
            return {}
    
    def _extract_content_with_bs4(self, html_content: str, url: str) -> Dict[str, Any]:
        """使用BeautifulSoup提取文章内容（备用方案）"""
        try:
            soup = BeautifulSoup(html_content, 'lxml')
            
            # 移除脚本和样式标签
            for tag in soup(['script', 'style', 'nav', 'header', 'footer', 'aside']):
                tag.decompose()
            
            # 提取标题
            title = ''
            title_selectors = ['title', 'h1', '.title', '#title', '[class*="title"]']
            for selector in title_selectors:
                title_elem = soup.select_one(selector)
                if title_elem and title_elem.get_text(strip=True):
                    title = title_elem.get_text(strip=True)
                    break
            
            # 提取正文内容
            content = ''
            content_selectors = [
                'article', '.content', '.article-content', '.post-content',
                '#content', '.entry-content', '.article-body', 'main'
            ]
            
            for selector in content_selectors:
                content_elem = soup.select_one(selector)
                if content_elem:
                    # 移除广告和导航元素
                    for ad in content_elem.select('.ad, .advertisement, .sidebar, .related'):
                        ad.decompose()
                    
                    content = content_elem.get_text(strip=True, separator='\n')
                    if len(content) > settings.MIN_CONTENT_LENGTH:
                        break
            
            # 如果还是没有足够内容，提取页面所有文本
            if len(content) < settings.MIN_CONTENT_LENGTH:
                content = soup.get_text(strip=True, separator='\n')
            
            # 提取元数据
            meta_description = ''
            meta_desc = soup.find('meta', attrs={'name': 'description'})
            if meta_desc and hasattr(meta_desc, 'get'):
                meta_description = meta_desc.get('content', '')
            
            return {
                'title': title,
                'content': content,
                'summary': content[:200] + '...' if len(content) > 200 else content,
                'author': '',
                'publish_date': None,
                'meta_description': meta_description,
                'keywords': []
            }
            
        except Exception as e:
            logger.error(f"BeautifulSoup提取失败: {e}")
            return {
                'title': '',
                'content': '',
                'summary': '',
                'author': '',
                'publish_date': None,
                'meta_description': '',
                'keywords': []
            }
    
    def _validate_content(self, article_data: Dict[str, Any]) -> bool:
        """验证文章内容"""
        content = article_data.get('content', '')
        title = article_data.get('title', '')
        
        # 检查内容长度
        if len(content) < settings.MIN_CONTENT_LENGTH:
            return False
            
        if len(content) > settings.MAX_CONTENT_LENGTH:
            return False
        
        # 检查是否有标题
        if not title or len(title) < 5:
            return False
        
        # 检查内容质量（简单启发式规则）
        if content.count('\n') < 2:  # 内容应该有一定的段落结构
            return False
            
        return True
    
    def _create_article_model(self, article_data: Dict[str, Any], url: str, industry_keyword: str) -> ArticleModel:
        """创建文章模型"""
        parsed_url = urlparse(url)
        
        return ArticleModel(
            url=url,
            title=article_data.get('title', ''),
            content=article_data.get('content', ''),
            summary=article_data.get('summary', ''),
            author=article_data.get('author', ''),
            publish_date=article_data.get('publish_date'),
            industry_keyword=industry_keyword,
            source_domain=parsed_url.netloc,
            content_length=len(article_data.get('content', '')),
                         language='zh',
             meta_description=article_data.get('meta_description', ''),
             keywords=article_data.get('keywords', []),
             status='success',
             error_message=None
        )
    
    def close(self):
        """关闭会话"""
        self.session.close() 