from playwright.sync_api import sync_playwright
import time

def scrape_36kr_news():
    with sync_playwright() as p:
        # 启动浏览器
        browser = p.chromium.launch(headless=True)  # 设置为False可以看到浏览器操作
        page = browser.new_page()
        
        try:
            # 访问页面
            print("正在访问页面...")
            page.goto('https://36kr.com/newsflashes/', wait_until='networkidle')
            
            # 等待动态内容加载
            print("等待内容加载...")
            page.wait_for_selector('//*[@id="app"]/div/div[2]/div[3]/div/div/div[1]/div[3]/div/div[1]/div[1]', timeout=10000)
            
            # 给页面一些额外时间加载完整内容
            time.sleep(3)
            
            # 获取所有新闻项（从div[1]到div[20]）
            news_items = []
            for i in range(1, 21):
                div_selector = f'//*[@id="app"]/div/div[2]/div[3]/div/div/div[1]/div[3]/div/div[1]/div[{i}]'
                
                # 检查该元素是否存在
                if page.locator(div_selector).count() > 0:
                    news_items.append(div_selector)
                else:
                    print(f"警告: 元素 div[{i}] 未找到")
            
            print(f"找到 {len(news_items)} 个新闻项")
            print("=" * 50)
            
            # 提取每个新闻项的标题和正文
            for index, item_selector in enumerate(news_items, 1):
                try:
                    # 构建当前新闻项内的标题和正文选择器
                    title_selector = f'{item_selector}/div[3]/div[2]/div/a'
                    content_selector = f'{item_selector}/div[3]/div[2]/div/div[2]/span'
                    
                    # 提取标题
                    if page.locator(title_selector).count() > 0:
                        title = page.locator(title_selector).first.text_content().strip()
                    else:
                        # 如果上述选择器找不到，尝试其他可能的选择器
                        title_alt_selector = f'{item_selector}//a[contains(@class, "item-title")]'
                        if page.locator(title_alt_selector).count() > 0:
                            title = page.locator(title_alt_selector).first.text_content().strip()
                        else:
                            title = "标题未找到"
                    
                    # 提取正文
                    if page.locator(content_selector).count() > 0:
                        content = page.locator(content_selector).first.text_content().strip()
                    else:
                        # 如果上述选择器找不到，尝试其他可能的选择器
                        content_alt_selector = f'{item_selector}//div[contains(@class, "item-desc")]//span'
                        if page.locator(content_alt_selector).count() > 0:
                            content = page.locator(content_alt_selector).first.text_content().strip()
                        else:
                            content = "正文未找到"
                    
                    # 打印结果
                    print(f"新闻 {index}:")
                    print(f"标题: {title}")
                    print(f"正文: {content}")
                    print("-" * 50)
                    
                except Exception as e:
                    print(f"提取第 {index} 个新闻时出错: {e}")
                    continue
            
        except Exception as e:
            print(f"访问页面时出错: {e}")
        
        finally:
            # 关闭浏览器
            browser.close()

# 更稳定的版本 - 使用类名选择器
def scrape_36kr_news_stable():
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=True)
        page = browser.new_page()
        
        try:
            print("正在访问页面...")
            page.goto('https://36kr.com/newsflashes/', wait_until='networkidle')
            
            # 使用类名选择器更稳定
            print("等待内容加载...")
            page.wait_for_selector('.newsflash-item', timeout=10000)
            time.sleep(3)
            
            # 获取所有新闻项
            news_items = page.locator('.newsflash-item').all()
            print(f"找到 {len(news_items)} 个新闻项")
            print("=" * 50)
            
            # 提取前20个新闻
            for index, item in enumerate(news_items[:20], 1):
                try:
                    # 提取标题
                    title_element = item.locator('a.item-title').first
                    if title_element.count() > 0:
                        title = title_element.text_content().strip()
                    else:
                        title = "标题未找到"
                    
                    # 提取正文
                    content_element = item.locator('.item-desc span').first
                    if content_element.count() > 0:
                        content = content_element.text_content().strip()
                    else:
                        content = "正文未找到"
                    
                    print(f"新闻 {index}:")
                    print(f"标题: {title}")
                    print(f"正文: {content}")
                    print("-" * 50)
                    
                except Exception as e:
                    print(f"提取第 {index} 个新闻时出错: {e}")
                    continue
                    
        except Exception as e:
            print(f"访问页面时出错: {e}")
        
        finally:
            browser.close()

if __name__ == "__main__":
    print("开始抓取36氪快讯...")
    
    # 两种方法都尝试
    try:
        scrape_36kr_news()
    except Exception as e:
        print(f"第一种方法失败: {e}")
        print("\n尝试第二种方法...")
        scrape_36kr_news_stable()
