import asyncio
from playwright.async_api import async_playwright
from datetime import datetime

async def simple_scrape():
    """简化版本的抓取脚本"""
    async with async_playwright() as p:
        # 启动浏览器
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        
        try:
            print("正在访问华尔街见闻...")
            
            # 设置更长的导航超时
            page.set_default_navigation_timeout(60000)
            page.set_default_timeout(60000)
            
            # 访问页面，使用更宽松的等待条件
            await page.goto('https://wallstreetcn.com/', wait_until='domcontentloaded')
            print("页面基础内容已加载")
            
            # 等待一段时间让JavaScript执行
            await page.wait_for_timeout(10000)
            
            # 直接搜索文章链接
            links = await page.evaluate('''() => {
                const articles = [];
                // 查找所有包含文章的链接
                document.querySelectorAll('a[href*="/articles/"]').forEach(link => {
                    const href = link.getAttribute('href');
                    if (href && href.includes('/articles/')) {
                        articles.push(href);
                    }
                });
                return articles;
            }''')
            
            # 处理链接
            article_links = []
            for href in links:
                if not href.startswith('http'):
                    href = 'https://wallstreetcn.com' + href
                if href not in article_links:
                    article_links.append(href)
                    print(f"找到文章: {href}")
            
            if not article_links:
                print("未找到文章链接")
                return
            
            print(f"找到 {len(article_links)} 篇文章")
            
            # 保存结果
            filename = f"articles_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt"
            with open(filename, 'w', encoding='utf-8') as f:
                for i, url in enumerate(article_links, 1):  # 只处理前3篇
                    print(f"处理第 {i} 篇文章...")
                    try:
                        article_page = await browser.new_page()
                        await article_page.goto(url, wait_until='domcontentloaded')
                        await article_page.wait_for_timeout(3000)
                        
                        # 提取内容
                        content = await article_page.evaluate('''() => {
                            const title = document.querySelector('h1')?.innerText || '未找到标题';
                            const content = document.querySelector('article')?.innerText || document.body.innerText;
                            return {title, content};
                        }''')
                        
                        f.write(f"文章 {i}: {url}\n")
                        f.write(f"标题: {content['title']}\n")
                        f.write(f"内容: {content['content']}...\n")  # 只保存前500字符
                        f.write("-" * 50 + "\n\n")
                        
                        await article_page.close()
                    except Exception as e:
                        print(f"处理文章失败: {e}")
                        f.write(f"文章 {i} 处理失败: {str(e)}\n")
            
            print(f"完成！结果保存在: {filename}")
            
        except Exception as e:
            print(f"错误: {e}")
        finally:
            await browser.close()

if __name__ == "__main__":
    asyncio.run(simple_scrape())
