from urllib.parse import urlparse
import requests
from newspaper import Article
import chardet
from playwright.sync_api import sync_playwright

DEFAULT_HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

class UrlFetcher:
    def __init__(self, timeout=10):
        self.timeout = timeout
        self.session = requests.Session()
        self.session.headers.update(DEFAULT_HEADERS)
    
    def fetch(self, url):
        if not url.startswith(('http://', 'https://')):
            return {"status": "error", 'message': 'Invalid URL format'}

        result = {'status': 'success'}
        try:
            # 第一阶段：常规请求和解析
            response = self.session.get(url, timeout=self.timeout)
            response.raise_for_status()

            detected_encoding = chardet.detect(response.content)['encoding']
            response.encoding = detected_encoding if detected_encoding else 'utf-8'
            
            article = self._parse_article(url, response.text)
            result.update(article)
            result['source_domain'] = urlparse(url).netloc
            
            # 第二阶段：关键字段缺失时使用无头浏览器
            if any(not result.get(k) for k in ['title', 'text', 'publish_date']):
                headless_html = self._fetch_headless_content(url)
                if headless_html:
                    headless_result = self._parse_article(url, headless_html)
                    for k in ['title', 'text', 'publish_date', 'authors', 'top_image', 'keywords', 'summary']:
                        result[k] = result.get(k) or headless_result.get(k)
        except requests.exceptions.RequestException as e:
            result = {'status': 'error', 'message': f'Request failed: {str(e)}'}
        except Exception as e:
            result = {'status': 'error', 'message': f'Processing error: {str(e)}'}
        
        return result
    
    def _parse_article(self, url, html):
        article = Article(url, language='zh')
        article.download(input_html=html)
        article.parse()
        
        return {
            'title': article.title,
            'text': article.text,
            'publish_date': str(article.publish_date),
            'authors': article.authors,
            'top_image': article.top_image,
            'keywords': article.keywords,
            'summary': article.summary
        }
    
    def _fetch_headless_content(self, url):
        with sync_playwright() as p:
            browser = p.chromium.launch(
                headless=True,
                args=[
                    '--disable-blink-features=AutomationControlled',
                    '--disable-web-security',
                    '--disable-features=IsolateOrigins,site-per-process',
                    '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                ]
            )
            context = browser.new_context(
                java_script_enabled=True,
                user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
            )
            page = context.new_page()
            page.add_init_script("""
                delete navigator.__proto__.webdriver;
                window.navigator.chrome = { runtime: {} };
                Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3] });
            """)
            
            try:
                page.goto(url, timeout=self.timeout * 1000, wait_until="domcontentloaded")
                page.wait_for_selector("article, .content, main", timeout=5000)
                page.mouse.wheel(0, 1000)
                page.wait_for_timeout(1000)
                return page.content()
            except Exception as e:
                print(f"Playwright error: {str(e)}")
                return None
            finally:
                context.close()
                browser.close()
